use bitcoin::secp256k1;
use chain;
-use chain::Confirm;
-use chain::Watch;
-use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
+use chain::{Confirm, Watch, BestBlock};
+use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use chain::transaction::{OutPoint, TransactionData};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-pub use ln::channel::CounterpartyForwardingInfo;
-use ln::channel::{Channel, ChannelError, ChannelUpdateStatus};
+use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
use ln::msgs;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
use util::config::UserConfig;
-use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use util::{byte_utils, events};
-use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
+use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
use util::chacha20::{ChaCha20, ChaChaReader};
-use util::logger::Logger;
+use util::logger::{Logger, Level};
use util::errors::APIError;
-use std::{cmp, mem};
-use std::collections::{HashMap, hash_map, HashSet};
-use std::io::{Cursor, Read};
-use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::Duration;
+use io;
+use prelude::*;
+use core::{cmp, mem};
+use core::cell::RefCell;
+use io::{Cursor, Read};
+use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
+use core::sync::atomic::{AtomicUsize, Ordering};
+use core::time::Duration;
#[cfg(any(test, feature = "allow_wallclock_use"))]
use std::time::Instant;
-use std::ops::Deref;
-use bitcoin::hashes::hex::ToHex;
+use core::ops::Deref;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
payment_data: msgs::FinalOnionHopData,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
},
+ ReceiveKeysend {
+ payment_preimage: PaymentPreimage,
+ incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
+ },
}
#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
outpoint: OutPoint,
}
-struct ClaimableHTLC {
- prev_hop: HTLCPreviousHopData,
- value: u64,
+enum OnionPayload {
/// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
/// payment_secret which prevents path-probing attacks and can associate different HTLCs which
/// are part of the same payment.
- payment_data: msgs::FinalOnionHopData,
+ Invoice(msgs::FinalOnionHopData),
+ /// Contains the payer-provided preimage.
+ Spontaneous(PaymentPreimage),
+}
+
+struct ClaimableHTLC {
+ prev_hop: HTLCPreviousHopData,
cltv_expiry: u32,
+ value: u64,
+ onion_payload: OnionPayload,
+}
+
+/// A payment identifier used to uniquely identify a payment to LDK.
+#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
+pub struct PaymentId(pub [u8; 32]);
+
+impl Writeable for PaymentId {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.0.write(w)
+ }
}
+impl Readable for PaymentId {
+ fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let buf: [u8; 32] = Readable::read(r)?;
+ Ok(PaymentId(buf))
+ }
+}
/// Tracks the inbound corresponding to an outbound HTLC
#[derive(Clone, PartialEq)]
pub(crate) enum HTLCSource {
/// Technically we can recalculate this from the route, but we cache it here to avoid
/// doing a double-pass on route when we get a failure back
first_hop_htlc_msat: u64,
+ payment_id: PaymentId,
},
}
#[cfg(test)]
path: Vec::new(),
session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
first_hop_htlc_msat: 0,
+ payment_id: PaymentId([2; 32]),
}
}
}
}
}
+/// Return value for claim_funds_from_hop
+enum ClaimFundsFromHop {
+ PrevHopForceClosed,
+ MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
+ Success(u64),
+ DuplicateClaim,
+}
+
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
struct MsgHandleErrInternal {
err: msgs::LightningError,
+ chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
}
impl MsgHandleErrInternal {
},
},
},
+ chan_id: None,
shutdown_finish: None,
}
}
err,
action: msgs::ErrorAction::IgnoreError,
},
+ chan_id: None,
shutdown_finish: None,
}
}
#[inline]
fn from_no_close(err: msgs::LightningError) -> Self {
- Self { err, shutdown_finish: None }
+ Self { err, chan_id: None, shutdown_finish: None }
}
#[inline]
fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
},
},
},
+ chan_id: Some(channel_id),
shutdown_finish: Some((shutdown_res, channel_update)),
}
}
fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
Self {
err: match err {
+ ChannelError::Warn(msg) => LightningError {
+ err: msg,
+ action: msgs::ErrorAction::IgnoreError,
+ },
ChannelError::Ignore(msg) => LightningError {
err: msg,
action: msgs::ErrorAction::IgnoreError,
},
},
},
+ chan_id: None,
shutdown_finish: None,
}
}
min_value_msat: Option<u64>,
}
+/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
+/// and later, also stores information for retrying the payment.
+pub(crate) enum PendingOutboundPayment {
+ Legacy {
+ session_privs: HashSet<[u8; 32]>,
+ },
+ Retryable {
+ session_privs: HashSet<[u8; 32]>,
+ payment_hash: PaymentHash,
+ payment_secret: Option<PaymentSecret>,
+ pending_amt_msat: u64,
+ /// The total payment amount across all paths, used to verify that a retry is not overpaying.
+ total_msat: u64,
+ /// Our best known block height at the time this payment was initiated.
+ starting_block_height: u32,
+ },
+}
+
+impl PendingOutboundPayment {
+ fn remove(&mut self, session_priv: &[u8; 32], part_amt_msat: u64) -> bool {
+ let remove_res = match self {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } => {
+ session_privs.remove(session_priv)
+ }
+ };
+ if remove_res {
+ if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self {
+ *pending_amt_msat -= part_amt_msat;
+ }
+ }
+ remove_res
+ }
+
+ fn insert(&mut self, session_priv: [u8; 32], part_amt_msat: u64) -> bool {
+ let insert_res = match self {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } => {
+ session_privs.insert(session_priv)
+ }
+ };
+ if insert_res {
+ if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self {
+ *pending_amt_msat += part_amt_msat;
+ }
+ }
+ insert_res
+ }
+
+ fn remaining_parts(&self) -> usize {
+ match self {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } => {
+ session_privs.len()
+ }
+ }
+ }
+}
+
/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
/// Locked *after* channel_state.
pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
- /// The session_priv bytes of outbound payments which are pending resolution.
+ /// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
/// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
/// (if the channel has been force-closed), however we track them here to prevent duplicative
- /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative
+ /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
/// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
/// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
/// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
/// after reloading from disk while replaying blocks against ChannelMonitors.
///
+ /// See `PendingOutboundPayment` documentation for more info.
+ ///
/// Locked *after* channel_state.
- pending_outbound_payments: Mutex<HashSet<[u8; 32]>>,
+ pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
our_network_key: SecretKey,
our_network_pubkey: PublicKey,
/// Because adding or removing an entry is rare, we usually take an outer read lock and then
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
/// new channel.
+ ///
+ /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
pending_events: Mutex<Vec<events::Event>>,
/// Typically, the block-specific parameters are derived from the best block hash for the network,
/// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
/// are not needed when deserializing a previously constructed `ChannelManager`.
+#[derive(Clone, Copy, PartialEq)]
pub struct ChainParameters {
/// The network for determining the `chain_hash` in Lightning messages.
pub network: Network,
pub best_block: BestBlock,
}
-/// The best known block as identified by its hash and height.
-#[derive(Clone, Copy)]
-pub struct BestBlock {
- block_hash: BlockHash,
- height: u32,
-}
-
-impl BestBlock {
- /// Returns the best block from the genesis of the given network.
- pub fn from_genesis(network: Network) -> Self {
- BestBlock {
- block_hash: genesis_block(network).header.block_hash(),
- height: 0,
- }
- }
-
- /// Returns the best block as identified by the given block hash and height.
- pub fn new(block_hash: BlockHash, height: u32) -> Self {
- BestBlock { block_hash, height }
- }
-
- /// Returns the best block hash.
- pub fn block_hash(&self) -> BlockHash { self.block_hash }
-
- /// Returns the best block height.
- pub fn height(&self) -> u32 { self.height }
-}
-
#[derive(Copy, Clone, PartialEq)]
enum NotifyOption {
DoPersist,
const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
-// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
+// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
#[deny(const_err)]
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+/// Information needed for constructing an invoice route hint for this channel.
+#[derive(Clone, Debug, PartialEq)]
+pub struct CounterpartyForwardingInfo {
+ /// Base routing fee in millisatoshis.
+ pub fee_base_msat: u32,
+ /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
+ pub fee_proportional_millionths: u32,
+ /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
+ /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
+ /// `cltv_expiry_delta` for more details.
+ pub cltv_expiry_delta: u16,
+}
+
+/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
+/// to better separate parameters.
+#[derive(Clone, Debug, PartialEq)]
+pub struct ChannelCounterparty {
+ /// The node_id of our counterparty
+ pub node_id: PublicKey,
+ /// The Features the channel counterparty provided upon last connection.
+ /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
+ /// many routing-relevant features are present in the init context.
+ pub features: InitFeatures,
+ /// The value, in satoshis, that must always be held in the channel for our counterparty. This
+ /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
+ /// claiming at least this value on chain.
+ ///
+ /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
+ ///
+ /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
+ pub unspendable_punishment_reserve: u64,
+ /// Information on the fees and requirements that the counterparty requires when forwarding
+ /// payments to us through this channel.
+ pub forwarding_info: Option<CounterpartyForwardingInfo>,
+}
+
/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
-#[derive(Clone)]
+#[derive(Clone, Debug, PartialEq)]
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
/// thereafter this is the txid of the funding transaction xor the funding transaction output).
/// Note that this means this value is *not* persistent - it can change once during the
/// lifetime of the channel.
pub channel_id: [u8; 32],
+ /// Parameters which apply to our counterparty. See individual fields for more information.
+ pub counterparty: ChannelCounterparty,
/// The Channel's funding transaction output, if we've negotiated the funding transaction with
/// our counterparty already.
///
/// The position of the funding transaction in the chain. None if the funding transaction has
/// not yet been confirmed and the channel fully opened.
pub short_channel_id: Option<u64>,
- /// The node_id of our counterparty
- pub remote_network_id: PublicKey,
- /// The Features the channel counterparty provided upon last connection.
- /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
- /// many routing-relevant features are present in the init context.
- pub counterparty_features: InitFeatures,
/// The value, in satoshis, of this channel as appears in the funding output
pub channel_value_satoshis: u64,
+ /// The value, in satoshis, that must always be held in the channel for us. This value ensures
+ /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least
+ /// this value on chain.
+ ///
+ /// This value is not included in [`outbound_capacity_msat`] as it can never be spent.
+ ///
+ /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+ ///
+ /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
+ pub unspendable_punishment_reserve: Option<u64>,
/// The user_id passed in to create_channel, or 0 if the channel was inbound.
pub user_id: u64,
/// The available outbound capacity for sending HTLCs to the remote peer. This does not include
/// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
/// available for inclusion in new outbound HTLCs). This further does not include any pending
/// outgoing HTLCs which are awaiting some other resolution to be sent.
+ ///
+ /// This value is not exact. Due to various in-flight changes, feerate changes, and our
+ /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
+ /// should be able to spend nearly this amount.
pub outbound_capacity_msat: u64,
/// The available inbound capacity for the remote peer to send HTLCs to us. This does not
/// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
/// available for inclusion in new inbound HTLCs).
/// Note that there are some corner cases not fully handled here, so the actual available
/// inbound capacity may be slightly higher than this.
+ ///
+ /// This value is not exact. Due to various in-flight changes, feerate changes, and our
+ /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable.
+ /// However, our counterparty should be able to spend nearly this amount.
pub inbound_capacity_msat: u64,
+ /// The number of required confirmations on the funding transaction before the funding will be
+ /// considered "locked". This number is selected by the channel fundee (i.e. us if
+ /// [`is_outbound`] is *not* set), and can be selected for inbound channels with
+ /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with
+ /// [`ChannelHandshakeLimits::max_minimum_depth`].
+ ///
+ /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+ ///
+ /// [`is_outbound`]: ChannelDetails::is_outbound
+ /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
+ /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
+ pub confirmations_required: Option<u32>,
+ /// The number of blocks (after our commitment transaction confirms) that we will need to wait
+ /// until we can claim our funds after we force-close the channel. During this time our
+ /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
+ /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any
+ /// time to claim our non-HTLC-encumbered funds.
+ ///
+ /// This value will be `None` for outbound channels until the counterparty accepts the channel.
+ pub force_close_spend_delay: Option<u16>,
/// True if the channel was initiated (and thus funded) by us.
pub is_outbound: bool,
/// True if the channel is confirmed, funding_locked messages have been exchanged, and the
/// channel is not currently being shut down. `funding_locked` message exchange implies the
/// required confirmation count has been reached (and we were connected to the peer at some
- /// point after the funding transaction received enough confirmations).
+ /// point after the funding transaction received enough confirmations). The required
+ /// confirmation count is provided in [`confirmations_required`].
+ ///
+ /// [`confirmations_required`]: ChannelDetails::confirmations_required
pub is_funding_locked: bool,
/// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
- /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
- /// channel is not currently negotiating a shutdown.
+ /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
///
/// This is a strict superset of `is_funding_locked`.
pub is_usable: bool,
/// True if this channel is (or will be) publicly-announced.
pub is_public: bool,
- /// Information on the fees and requirements that the counterparty requires when forwarding
- /// payments to us through this channel.
- pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
}
/// If a payment fails to send, it can be in one of several states. This enum is returned as the
($self: ident, $internal: expr, $counterparty_node_id: expr) => {
match $internal {
Ok(msg) => Ok(msg),
- Err(MsgHandleErrInternal { err, shutdown_finish }) => {
+ Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
#[cfg(debug_assertions)]
{
// In testing, ensure there are no deadlocks where the lock is already held upon
// entering the macro.
assert!($self.channel_state.try_lock().is_ok());
+ assert!($self.pending_events.try_lock().is_ok());
}
let mut msg_events = Vec::with_capacity(2);
msg: update
});
}
+ if let Some(channel_id) = chan_id {
+ $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, reason: ClosureReason::ProcessingError { err: err.err.clone() } });
+ }
}
log_error!($self.logger, "{}", err.err);
}
}
+/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
+macro_rules! convert_chan_err {
+ ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
+ match $err {
+ ChannelError::Warn(msg) => {
+ //TODO: Once warning messages are merged, we should send a `warning` message to our
+ //peer here.
+ (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+ },
+ ChannelError::Ignore(msg) => {
+ (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+ },
+ ChannelError::Close(msg) => {
+ log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
+ if let Some(short_id) = $channel.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
+ }
+ let shutdown_res = $channel.force_shutdown(true);
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+ },
+ ChannelError::CloseDelayBroadcast(msg) => {
+ log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
+ if let Some(short_id) = $channel.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
+ }
+ let shutdown_res = $channel.force_shutdown(false);
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
+ }
+ }
+ }
+}
+
macro_rules! break_chan_entry {
($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
match $res {
Ok(res) => res,
- Err(ChannelError::Ignore(msg)) => {
- break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
- },
- Err(ChannelError::Close(msg)) => {
- log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ Err(e) => {
+ let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+ if drop {
+ $entry.remove_entry();
}
- break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
- },
- Err(ChannelError::CloseDelayBroadcast(_)) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
+ break Err(res);
+ }
}
}
}
($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
match $res {
Ok(res) => res,
- Err(ChannelError::Ignore(msg)) => {
- return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
- },
- Err(ChannelError::Close(msg)) => {
- log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
- }
- return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
- },
- Err(ChannelError::CloseDelayBroadcast(msg)) => {
- log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ Err(e) => {
+ let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+ if drop {
+ $entry.remove_entry();
}
- let shutdown_res = chan.force_shutdown(false);
- return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
+ return Err(res);
+ }
+ }
+ }
+}
+
+macro_rules! remove_channel {
+ ($channel_state: expr, $entry: expr) => {
+ {
+ let channel = $entry.remove_entry().1;
+ if let Some(short_id) = channel.get_short_channel_id() {
+ $channel_state.short_to_id.remove(&short_id);
}
+ channel
}
}
}
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => {
match $err {
ChannelMonitorUpdateErr::PermanentFailure => {
- log_error!($self.logger, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..]));
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
+ if let Some(short_id) = $chan.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
}
// TODO: $failed_fails is dropped here, which will cause other channels to hit the
// chain in a confused state! We need to move them into the ChannelMonitor which
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
- res
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id,
+ $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
+ (res, true)
},
ChannelMonitorUpdateErr::TemporaryFailure => {
log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
- log_bytes!($entry.key()[..]),
+ log_bytes!($chan_id[..]),
if $resend_commitment && $resend_raa {
match $action_type {
RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
if !$resend_raa {
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
}
- $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
- Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$entry.key()))
+ $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+ (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
},
}
- }
+ };
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { {
+ let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key());
+ if drop {
+ $entry.remove_entry();
+ }
+ res
+ } };
}
macro_rules! return_monitor_err {
}
}
+macro_rules! handle_chan_restoration_locked {
+ ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
+ $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
+ $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr) => { {
+ let mut htlc_forwards = None;
+ let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
+
+ let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
+ let chanmon_update_is_none = chanmon_update.is_none();
+ let res = loop {
+ let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
+ if !forwards.is_empty() {
+ htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
+ $channel_entry.get().get_funding_txo().unwrap(), forwards));
+ }
+
+ if chanmon_update.is_some() {
+ // On reconnect, we, by definition, only resend a funding_locked if there have been
+ // no commitment updates, so the only channel monitor update which could also be
+ // associated with a funding_locked would be the funding_created/funding_signed
+ // monitor update. That monitor update failing implies that we won't send
+ // funding_locked until it's been updated, so we can't have a funding_locked and a
+ // monitor update here (so we don't bother to handle it correctly below).
+ assert!($funding_locked.is_none());
+ // A channel monitor update makes no sense without either a funding_locked or a
+ // commitment update to process after it. Since we can't have a funding_locked, we
+ // only bother to handle the monitor-update + commitment_update case below.
+ assert!($commitment_update.is_some());
+ }
+
+ if let Some(msg) = $funding_locked {
+ // Similar to the above, this implies that we're letting the funding_locked fly
+ // before it should be allowed to.
+ assert!(chanmon_update.is_none());
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+ node_id: counterparty_node_id,
+ msg,
+ });
+ if let Some(announcement_sigs) = $self.get_announcement_sigs($channel_entry.get()) {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: counterparty_node_id,
+ msg: announcement_sigs,
+ });
+ }
+ $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), $channel_entry.get().channel_id());
+ }
+
+ let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
+ if let Some(monitor_update) = chanmon_update {
+ // We only ever broadcast a funding transaction in response to a funding_signed
+ // message and the resulting monitor update. Thus, on channel_reestablish
+ // message handling we can't have a funding transaction to broadcast. When
+ // processing a monitor update finishing resulting in a funding broadcast, we
+ // cannot have a second monitor update, thus this case would indicate a bug.
+ assert!(funding_broadcastable.is_none());
+ // Given we were just reconnected or finished updating a channel monitor, the
+ // only case where we can get a new ChannelMonitorUpdate would be if we also
+ // have some commitment updates to send as well.
+ assert!($commitment_update.is_some());
+ if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ // channel_reestablish doesn't guarantee the order it returns is sensical
+ // for the messages it returns, but if we're setting what messages to
+ // re-transmit on monitor update success, we need to make sure it is sane.
+ let mut order = $order;
+ if $raa.is_none() {
+ order = RAACommitmentOrder::CommitmentFirst;
+ }
+ break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
+ }
+ }
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = $commitment_update {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: counterparty_node_id,
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = $raa {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: counterparty_node_id,
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match $order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+ if let Some(tx) = funding_broadcastable {
+ log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+ $self.tx_broadcaster.broadcast_transaction(&tx);
+ }
+ break Ok(());
+ };
+
+ if chanmon_update_is_none {
+ // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
+ // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
+ // should *never* end up calling back to `chain_monitor.update_channel()`.
+ assert!(res.is_ok());
+ }
+
+ (htlc_forwards, res, counterparty_node_id)
+ } }
+}
+
+macro_rules! post_handle_chan_restoration {
+ ($self: ident, $locked_res: expr) => { {
+ let (htlc_forwards, res, counterparty_node_id) = $locked_res;
+
+ let _ = handle_error!($self, res, counterparty_node_id);
+
+ if let Some(forwards) = htlc_forwards {
+ $self.forward_htlcs(&mut [forwards][..]);
+ }
+ } }
+}
+
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
pending_msg_events: Vec::new(),
}),
pending_inbound_payments: Mutex::new(HashMap::new()),
- pending_outbound_payments: Mutex::new(HashSet::new()),
+ pending_outbound_payments: Mutex::new(HashMap::new()),
our_network_key: keys_manager.get_node_secret(),
our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
///
/// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
/// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
+ ///
+ /// Note that we do not check if you are currently connected to the given peer. If no
+ /// connection is available, the outbound `open_channel` message may fail to send, resulting in
+ /// the channel eventually being silently forgotten.
pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option<UserConfig>) -> Result<(), APIError> {
if channel_value_satoshis < 1000 {
return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
}
- let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
- let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
+ let channel = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ match per_peer_state.get(&their_network_key) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
+ Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
+ }
+ };
let res = channel.get_open_channel(self.genesis_hash.clone());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
res.reserve(channel_state.by_id.len());
for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
+ let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
+ channel.get_holder_counterparty_selected_channel_reserve_satoshis();
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
+ counterparty: ChannelCounterparty {
+ node_id: channel.get_counterparty_node_id(),
+ features: InitFeatures::empty(),
+ unspendable_punishment_reserve: to_remote_reserve_satoshis,
+ forwarding_info: channel.counterparty_forwarding_info(),
+ },
funding_txo: channel.get_funding_txo(),
short_channel_id: channel.get_short_channel_id(),
- remote_network_id: channel.get_counterparty_node_id(),
- counterparty_features: InitFeatures::empty(),
channel_value_satoshis: channel.get_value_satoshis(),
+ unspendable_punishment_reserve: to_self_reserve_satoshis,
inbound_capacity_msat,
outbound_capacity_msat,
user_id: channel.get_user_id(),
+ confirmations_required: channel.minimum_depth(),
+ force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
is_outbound: channel.is_outbound(),
is_funding_locked: channel.is_usable(),
is_usable: channel.is_live(),
is_public: channel.should_announce(),
- counterparty_forwarding_info: channel.counterparty_forwarding_info(),
});
}
}
let per_peer_state = self.per_peer_state.read().unwrap();
for chan in res.iter_mut() {
- if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) {
- chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone();
+ if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) {
+ chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone();
}
}
res
self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
}
- /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
- /// will be accepted on the given channel, and after additional timeout/the closing of all
- /// pending HTLCs, the channel will be closed on chain.
- ///
- /// May generate a SendShutdown message event on success, which should be relayed.
- pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ /// Helper function that issues the channel close events
+ fn issue_channel_close_events(&self, channel: &Channel<Signer>, closure_reason: ClosureReason) {
+ let mut pending_events_lock = self.pending_events.lock().unwrap();
+ match channel.unbroadcasted_funding() {
+ Some(transaction) => {
+ pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
+ },
+ None => {},
+ }
+ pending_events_lock.push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: closure_reason });
+ }
+
+ fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let (mut failed_htlcs, chan_option) = {
+ let counterparty_node_id;
+ let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
- let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
+ counterparty_node_id = chan_entry.get().get_counterparty_node_id();
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ };
+ failed_htlcs = htlcs;
+
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update {
+ if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ let (result, is_permanent) =
+ handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+ if is_permanent {
+ remove_channel!(channel_state, chan_entry);
+ break result;
+ }
+ }
+ }
+
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: chan_entry.get().get_counterparty_node_id(),
+ node_id: counterparty_node_id,
msg: shutdown_msg
});
+
if chan_entry.get().is_shutdown() {
- if let Some(short_id) = chan_entry.get().get_short_channel_id() {
- channel_state.short_to_id.remove(&short_id);
+ let channel = remove_channel!(channel_state, chan_entry);
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: channel_update
+ });
}
- (failed_htlcs, Some(chan_entry.remove_entry().1))
- } else { (failed_htlcs, None) }
+ self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
+ }
+ break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
}
};
+
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- let chan_update = if let Some(chan) = chan_option {
- if let Ok(update) = self.get_channel_update(&chan) {
- Some(update)
- } else { None }
- } else { None };
-
- if let Some(update) = chan_update {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
+ let _ = handle_error!(self, result, counterparty_node_id);
Ok(())
}
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// * If we are the channel initiator, we will pay between our [`Background`] and
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+ /// estimate.
+ /// * If our counterparty is the channel initiator, we will require a channel closing
+ /// transaction feerate of at least our [`Background`] feerate or the feerate which
+ /// would appear on a force-closure transaction, whichever is lower. We will allow our
+ /// counterparty to pay as much fee as they'd like, however.
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ ///
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, None)
+ }
+
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
+ /// the channel being closed or not:
+ /// * If we are the channel initiator, we will pay at least this feerate on the closing
+ /// transaction. The upper-bound is set by
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
+ /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
+ /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure
+ /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
+ /// will appear on a force-closure transaction, whichever is lower).
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ ///
+ /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
+ /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
+ /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
+ pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight))
+ }
+
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
- log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
+ log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
}
}
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
+ /// `peer_node_id` should be set when we receive a message from a peer, but not set when the
+ /// user closes, which will be re-exposed as the `ChannelClosed` reason.
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let Some(short_id) = chan.get().get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
+ if peer_node_id.is_some() {
+ if let Some(peer_msg) = peer_msg {
+ self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
+ }
+ } else {
+ self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
+ }
chan.remove_entry().1
} else {
return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
};
- log_trace!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
+ log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.finish_force_close_channel(chan.force_shutdown(true));
- if let Ok(update) = self.get_channel_update(&chan) {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- match self.force_close_channel_with_peer(channel_id, None) {
+ match self.force_close_channel_with_peer(channel_id, None, None) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) };
- let (next_hop_data, next_hop_hmac) = {
- match msgs::OnionHopData::read(&mut chacha_stream) {
+ let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = {
+ match <msgs::OnionHopData as Readable>::read(&mut chacha_stream) {
Err(err) => {
let error_code = match err {
msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
};
let pending_forward_info = if next_hop_hmac == [0; 32] {
- #[cfg(test)]
- {
- // In tests, make sure that the initial onion pcket data is, at least, non-0.
- // We could do some fancy randomness test here, but, ehh, whatever.
- // This checks for the issue where you can calculate the path length given the
- // onion data as all the path entries that the originator sent will be here
- // as-is (and were originally 0s).
- // Of course reverse path calculation is still pretty easy given naive routing
- // algorithms, but this fixes the most-obvious case.
- let mut next_bytes = [0; 32];
- chacha_stream.read_exact(&mut next_bytes).unwrap();
- assert_ne!(next_bytes[..], [0; 32][..]);
- chacha_stream.read_exact(&mut next_bytes).unwrap();
- assert_ne!(next_bytes[..], [0; 32][..]);
- }
-
- // OUR PAYMENT!
- // final_expiry_too_soon
- // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least
- // HTLC_FAIL_BACK_BUFFER blocks to go.
- // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
- // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
- if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
- return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
- }
- // final_incorrect_htlc_amount
- if next_hop_data.amt_to_forward > msg.amount_msat {
- return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
- }
- // final_incorrect_cltv_expiry
- if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
- return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
- }
+ #[cfg(test)]
+ {
+ // In tests, make sure that the initial onion pcket data is, at least, non-0.
+ // We could do some fancy randomness test here, but, ehh, whatever.
+ // This checks for the issue where you can calculate the path length given the
+ // onion data as all the path entries that the originator sent will be here
+ // as-is (and were originally 0s).
+ // Of course reverse path calculation is still pretty easy given naive routing
+ // algorithms, but this fixes the most-obvious case.
+ let mut next_bytes = [0; 32];
+ chacha_stream.read_exact(&mut next_bytes).unwrap();
+ assert_ne!(next_bytes[..], [0; 32][..]);
+ chacha_stream.read_exact(&mut next_bytes).unwrap();
+ assert_ne!(next_bytes[..], [0; 32][..]);
+ }
- let payment_data = match next_hop_data.format {
- msgs::OnionHopDataFormat::Legacy { .. } => None,
- msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
- msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
- };
+ // OUR PAYMENT!
+ // final_expiry_too_soon
+ // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
+ // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
+ // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
+ // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
+ // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
+ if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+ return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
+ }
+ // final_incorrect_htlc_amount
+ if next_hop_data.amt_to_forward > msg.amount_msat {
+ return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
+ }
+ // final_incorrect_cltv_expiry
+ if next_hop_data.outgoing_cltv_value != msg.cltv_expiry {
+ return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
+ }
- if payment_data.is_none() {
- return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
- }
+ let routing = match next_hop_data.format {
+ msgs::OnionHopDataFormat::Legacy { .. } => return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]),
+ msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]),
+ msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
+ if payment_data.is_some() && keysend_preimage.is_some() {
+ return_err!("We don't support MPP keysend payments", 0x4000|22, &[0;0]);
+ } else if let Some(data) = payment_data {
+ PendingHTLCRouting::Receive {
+ payment_data: data,
+ incoming_cltv_expiry: msg.cltv_expiry,
+ }
+ } else if let Some(payment_preimage) = keysend_preimage {
+ // We need to check that the sender knows the keysend preimage before processing this
+ // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
+ // could discover the final destination of X, by probing the adjacent nodes on the route
+ // with a keysend payment of identical payment hash to X and observing the processing
+ // time discrepancies due to a hash collision with X.
+ let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+ if hashed_preimage != msg.payment_hash {
+ return_err!("Payment preimage didn't match payment hash", 0x4000|22, &[0;0]);
+ }
- // Note that we could obviously respond immediately with an update_fulfill_htlc
- // message, however that would leak that we are the recipient of this payment, so
- // instead we stay symmetric with the forwarding case, only responding (after a
- // delay) once they've send us a commitment_signed!
+ PendingHTLCRouting::ReceiveKeysend {
+ payment_preimage,
+ incoming_cltv_expiry: msg.cltv_expiry,
+ }
+ } else {
+ return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
+ }
+ },
+ };
- PendingHTLCStatus::Forward(PendingHTLCInfo {
- routing: PendingHTLCRouting::Receive {
- payment_data: payment_data.unwrap(),
- incoming_cltv_expiry: msg.cltv_expiry,
- },
- payment_hash: msg.payment_hash.clone(),
- incoming_shared_secret: shared_secret,
- amt_to_forward: next_hop_data.amt_to_forward,
- outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
- })
- } else {
- let mut new_packet_data = [0; 20*65];
- let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
- #[cfg(debug_assertions)]
- {
- // Check two things:
- // a) that the behavior of our stream here will return Ok(0) even if the TLV
- // read above emptied out our buffer and the unwrap() wont needlessly panic
- // b) that we didn't somehow magically end up with extra data.
- let mut t = [0; 1];
- debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
- }
- // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
- // fill the onion hop data we'll forward to our next-hop peer.
- chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
+ // Note that we could obviously respond immediately with an update_fulfill_htlc
+ // message, however that would leak that we are the recipient of this payment, so
+ // instead we stay symmetric with the forwarding case, only responding (after a
+ // delay) once they've send us a commitment_signed!
+
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ routing,
+ payment_hash: msg.payment_hash.clone(),
+ incoming_shared_secret: shared_secret,
+ amt_to_forward: next_hop_data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+ })
+ } else {
+ let mut new_packet_data = [0; 20*65];
+ let read_pos = chacha_stream.read(&mut new_packet_data).unwrap();
+ #[cfg(debug_assertions)]
+ {
+ // Check two things:
+ // a) that the behavior of our stream here will return Ok(0) even if the TLV
+ // read above emptied out our buffer and the unwrap() wont needlessly panic
+ // b) that we didn't somehow magically end up with extra data.
+ let mut t = [0; 1];
+ debug_assert!(chacha_stream.read(&mut t).unwrap() == 0);
+ }
+ // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we
+ // fill the onion hop data we'll forward to our next-hop peer.
+ chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]);
- let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
+ let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
- let blinding_factor = {
- let mut sha = Sha256::engine();
- sha.input(&new_pubkey.serialize()[..]);
- sha.input(&shared_secret);
- Sha256::from_engine(sha).into_inner()
- };
+ let blinding_factor = {
+ let mut sha = Sha256::engine();
+ sha.input(&new_pubkey.serialize()[..]);
+ sha.input(&shared_secret);
+ Sha256::from_engine(sha).into_inner()
+ };
- let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
- Err(e)
- } else { Ok(new_pubkey) };
+ let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
+ Err(e)
+ } else { Ok(new_pubkey) };
- let outgoing_packet = msgs::OnionPacket {
- version: 0,
- public_key,
- hop_data: new_packet_data,
- hmac: next_hop_hmac.clone(),
- };
+ let outgoing_packet = msgs::OnionPacket {
+ version: 0,
+ public_key,
+ hop_data: new_packet_data,
+ hmac: next_hop_hmac.clone(),
+ };
- let short_channel_id = match next_hop_data.format {
- msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
- msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
- msgs::OnionHopDataFormat::FinalNode { .. } => {
- return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
- },
- };
-
- PendingHTLCStatus::Forward(PendingHTLCInfo {
- routing: PendingHTLCRouting::Forward {
- onion_packet: outgoing_packet,
- short_channel_id,
- },
- payment_hash: msg.payment_hash.clone(),
- incoming_shared_secret: shared_secret,
- amt_to_forward: next_hop_data.amt_to_forward,
- outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
- })
+ let short_channel_id = match next_hop_data.format {
+ msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
+ msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
+ msgs::OnionHopDataFormat::FinalNode { .. } => {
+ return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
+ },
};
+ PendingHTLCStatus::Forward(PendingHTLCInfo {
+ routing: PendingHTLCRouting::Forward {
+ onion_packet: outgoing_packet,
+ short_channel_id,
+ },
+ payment_hash: msg.payment_hash.clone(),
+ incoming_shared_secret: shared_secret,
+ amt_to_forward: next_hop_data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
+ })
+ };
+
channel_state = Some(self.channel_state.lock().unwrap());
if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
// If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
// short_channel_id is non-0 in any ::Forward.
if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
- let forwarding_id = match id_option {
- None => { // unknown_next_peer
- return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
- },
- Some(id) => id.clone(),
- };
if let Some((err, code, chan_update)) = loop {
+ let forwarding_id = match id_option {
+ None => { // unknown_next_peer
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ },
+ Some(id) => id.clone(),
+ };
+
let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+ if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+ // Note that the behavior here should be identical to the above block - we
+ // should NOT reveal the existence or non-existence of a private channel if
+ // we don't allow forwards outbound over them.
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ }
+
// Note that we could technically not return an error yet here and just hope
// that the connection is reestablished or monitor updated by the time we get
// around to doing the actual forward, but better to fail early if we can and
// hopefully an attacker trying to path-trace payments cannot make this occur
// on a small/per-node/per-channel scale.
if !chan.is_live() { // channel_disabled
- break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+ break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
- break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+ break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
- let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) });
+ let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64)
+ .and_then(|prop_fee| { (prop_fee / 1000000)
+ .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) });
if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
- break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+ break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
- break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
+ break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
let cur_height = self.best_block.read().unwrap().height() + 1;
- // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
- // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
+ // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+ // but we want to be robust wrt to counterparty packet sanitization (see
+ // HTLC_FAIL_BACK_BUFFER rationale).
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
- break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+ break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
break Some(("CLTV expiry is too far in the future", 21, None));
}
- // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS.
- // But, to be safe against policy reception, we use a longuer delay.
- if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 {
- break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+ // If the HTLC expires ~now, don't bother trying to forward it to our
+ // counterparty. They should fail it anyway, but we don't want to bother with
+ // the round-trips or risk them deciding they definitely want the HTLC and
+ // force-closing to ensure they get it if we're offline.
+ // We previously had a much more aggressive check here which tried to ensure
+ // our counterparty receives an HTLC which has *our* risk threshold met on it,
+ // but there is no need to do that, and since we're a bit conservative with our
+ // risk threshold it just results in failing to forward payments.
+ if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
+ break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap())));
}
break None;
(pending_forward_info, channel_state.unwrap())
}
- /// only fails if the channel does not yet have an assigned short_id
+ /// Gets the current channel_update for the given channel. This first checks if the channel is
+ /// public, and thus should be called whenever the result is going to be passed out in a
+ /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
+ ///
+ /// May be called with channel_state already locked!
+ fn get_channel_update_for_broadcast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ if !chan.should_announce() {
+ return Err(LightningError {
+ err: "Cannot broadcast a channel_update for a private channel".to_owned(),
+ action: msgs::ErrorAction::IgnoreError
+ });
+ }
+ log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+ self.get_channel_update_for_unicast(chan)
+ }
+
+ /// Gets the current channel_update for the given channel. This does not check if the channel
+ /// is public (only returning an Err if the channel does not yet have an assigned short_id),
+ /// and thus MUST NOT be called unless the recipient of the resulting message has already
+ /// provided evidence that they know about the existence of the channel.
/// May be called with channel_state already locked!
- fn get_channel_update(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
let short_channel_id = match chan.get_short_channel_id() {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
cltv_expiry_delta: chan.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
- fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator),
+ fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
fee_proportional_millionths: chan.get_fee_proportional_millionths(),
excess_data: Vec::new(),
};
}
// Only public for testing, this should otherwise never be called direcly
- pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32) -> Result<(), APIError> {
+ pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
let prng_seed = self.keys_manager.get_secure_random_bytes();
let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
- let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?;
+ let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
if onion_utils::route_size_insane(&onion_payloads) {
return Err(APIError::RouteError{err: "Route size too large considering onion data"});
}
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes));
let err: Result<(), _> = loop {
let mut channel_lock = self.channel_state.lock().unwrap();
if !chan.get().is_live() {
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
}
- break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- path: path.clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- }, onion_packet, &self.logger), channel_state, chan)
+ let send_res = break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+ htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ payment_id,
+ }, onion_packet, &self.logger),
+ channel_state, chan);
+
+ let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+ let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable {
+ session_privs: HashSet::new(),
+ pending_amt_msat: 0,
+ payment_hash: *payment_hash,
+ payment_secret: *payment_secret,
+ starting_block_height: self.best_block.read().unwrap().height(),
+ total_msat: total_value,
+ });
+ assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat));
+
+ send_res
} {
Some((update_add, commitment_signed, monitor_update)) => {
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
return Err(APIError::MonitorUpdateFailed);
}
+ log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: path.first().unwrap().pubkey,
updates: msgs::CommitmentUpdate {
/// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
/// bit set (either as required or as available). If multiple paths are present in the Route,
/// we assume the invoice had the basic_mpp feature set.
- pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), PaymentSendFailure> {
+ pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<PaymentId, PaymentSendFailure> {
+ self.send_payment_internal(route, payment_hash, payment_secret, None, None, None)
+ }
+
+ fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: Option<PaymentId>, recv_value_msat: Option<u64>) -> Result<PaymentId, PaymentSendFailure> {
if route.paths.len() < 1 {
return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
}
// for now more than 10 paths likely carries too much one-path failure.
return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
}
+ if payment_secret.is_none() && route.paths.len() > 1 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
+ }
let mut total_value = 0;
let our_node_id = self.get_our_node_id();
let mut path_errs = Vec::with_capacity(route.paths.len());
+ let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) };
'path_check: for path in route.paths.iter() {
if path.len() < 1 || path.len() > 20 {
path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
if path_errs.iter().any(|e| e.is_err()) {
return Err(PaymentSendFailure::PathParameterError(path_errs));
}
+ if let Some(amt_msat) = recv_value_msat {
+ debug_assert!(amt_msat >= total_value);
+ total_value = amt_msat;
+ }
let cur_height = self.best_block.read().unwrap().height() + 1;
let mut results = Vec::new();
for path in route.paths.iter() {
- results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height));
+ results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage));
}
let mut has_ok = false;
let mut has_err = false;
} else if has_err {
Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
} else {
- Ok(())
+ Ok(payment_id)
+ }
+ }
+
+ /// Retries a payment along the given [`Route`].
+ ///
+ /// Errors returned are a superset of those returned from [`send_payment`], so see
+ /// [`send_payment`] documentation for more details on errors. This method will also error if the
+ /// retry amount puts the payment more than 10% over the payment's total amount, or if the payment
+ /// for the given `payment_id` cannot be found (likely due to timeout or success).
+ ///
+ /// [`send_payment`]: [`ChannelManager::send_payment`]
+ pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
+ const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
+ for path in route.paths.iter() {
+ if path.len() == 0 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "length-0 path in route".to_string()
+ }))
+ }
+ }
+
+ let (total_msat, payment_hash, payment_secret) = {
+ let outbounds = self.pending_outbound_payments.lock().unwrap();
+ if let Some(payment) = outbounds.get(&payment_id) {
+ match payment {
+ PendingOutboundPayment::Retryable {
+ total_msat, payment_hash, payment_secret, pending_amt_msat, ..
+ } => {
+ let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
+ if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+ }))
+ }
+ (*total_msat, *payment_hash, *payment_secret)
+ },
+ PendingOutboundPayment::Legacy { .. } => {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
+ }))
+ }
+ }
+ } else {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
+ }))
+ }
+ };
+ return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ())
+ }
+
+ /// Send a spontaneous payment, which is a payment that does not require the recipient to have
+ /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
+ /// the preimage, it must be a cryptographically secure random value that no intermediate node
+ /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
+ /// never reach the recipient.
+ ///
+ /// See [`send_payment`] documentation for more details on the return value of this function.
+ ///
+ /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
+ /// [`send_payment`] for more information about the risks of duplicate preimage usage.
+ ///
+ /// Note that `route` must have exactly one path.
+ ///
+ /// [`send_payment`]: Self::send_payment
+ pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+ let preimage = match payment_preimage {
+ Some(p) => p,
+ None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
+ };
+ let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
+ match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) {
+ Ok(payment_id) => Ok((payment_hash, payment_id)),
+ Err(e) => Err(e)
}
}
/// Note that this includes RBF or similar transaction replacement strategies - lightning does
/// not currently support replacing a funding transaction on an existing channel. Instead,
/// create a new channel with a conflicting funding transaction.
+ ///
+ /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
// be absurd. We ensure this by checking that at least 500 (our stated public contract on when
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
- const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
+ const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
#[deny(const_err)]
#[allow(dead_code)]
// ...by failing to compile if the number of addresses that would be half of a message is
// smaller than 500:
const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500;
- /// Generates a signed node_announcement from the given arguments and creates a
- /// BroadcastNodeAnnouncement event. Note that such messages will be ignored unless peers have
- /// seen a channel_announcement from us (ie unless we have public channels open).
+ /// Regenerates channel_announcements and generates a signed node_announcement from the given
+ /// arguments, providing them in corresponding events via
+ /// [`get_and_clear_pending_msg_events`], if at least one public channel has been confirmed
+ /// on-chain. This effectively re-broadcasts all channel announcements and sends our node
+ /// announcement to ensure that the lightning P2P network is aware of the channels we have and
+ /// our network addresses.
+ ///
+ /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this
+ /// node to humans. They carry no in-protocol meaning.
///
- /// RGB is a node "color" and alias is a printable human-readable string to describe this node
- /// to humans. They carry no in-protocol meaning.
+ /// `addresses` represent the set (possibly empty) of socket addresses on which this node
+ /// accepts incoming connections. These will be included in the node_announcement, publicly
+ /// tying these addresses together and to this node. If you wish to preserve user privacy,
+ /// addresses should likely contain only Tor Onion addresses.
///
- /// addresses represent the set (possibly empty) of socket addresses on which this node accepts
- /// incoming connections. These will be broadcast to the network, publicly tying these
- /// addresses together. If you wish to preserve user privacy, addresses should likely contain
- /// only Tor Onion addresses.
+ /// Panics if `addresses` is absurdly large (more than 500).
///
- /// Panics if addresses is absurdly large (more than 500).
+ /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
excess_data: Vec::new(),
};
let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
+ let node_announce_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
- msg: msgs::NodeAnnouncement {
- signature: self.secp_ctx.sign(&msghash, &self.our_network_key),
- contents: announcement
- },
- });
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+
+ let mut announced_chans = false;
+ for (_, chan) in channel_state.by_id.iter() {
+ if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+ msg,
+ update_msg: match self.get_channel_update_for_broadcast(chan) {
+ Ok(msg) => msg,
+ Err(_) => continue,
+ },
+ });
+ announced_chans = true;
+ } else {
+ // If the channel is not public or has not yet reached funding_locked, check the
+ // next channel. If we don't yet have any public channels, we'll skip the broadcast
+ // below as peers may not accept it without channels on chain first.
+ }
+ }
+
+ if announced_chans {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
+ msg: msgs::NodeAnnouncement {
+ signature: node_announce_sig,
+ contents: announcement
+ },
+ });
+ }
}
/// Processes HTLCs which are pending waiting on random forward delay.
onion_packet, ..
}, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
prev_funding_outpoint } => {
- log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id);
+ log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
outpoint: prev_funding_outpoint,
} else {
panic!("Stated return value requirements in send_htlc() were not met");
}
- let chan_update = self.get_channel_update(chan.get()).unwrap();
+ let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap();
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() }
));
panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
- log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
- match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
+ log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
Err(e) => {
if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
+ log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
} else {
panic!("Stated return value requirements in get_update_fail_htlc() were not met");
}
// close channel and then send error message to peer.
let counterparty_node_id = chan.get().get_counterparty_node_id();
let err: Result<(), _> = match e {
- ChannelError::Ignore(_) => {
+ ChannelError::Ignore(_) | ChannelError::Warn(_) => {
panic!("Stated return value requirements in send_commitment() were not met");
- },
+ }
ChannelError::Close(msg) => {
log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
let (channel_id, mut channel) = chan.remove_entry();
if let Some(short_id) = channel.get_short_channel_id() {
channel_state.short_to_id.remove(&short_id);
}
- Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok()))
+ // ChannelClosed event is generated by handle_error for us.
+ Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
},
ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
};
handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
continue;
}
+ log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
+ add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: chan.get().get_counterparty_node_id(),
updates: msgs::CommitmentUpdate {
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry },
- incoming_shared_secret, payment_hash, amt_to_forward, .. },
+ routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
prev_funding_outpoint } => {
+ let (cltv_expiry, onion_payload) = match routing {
+ PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry } =>
+ (incoming_cltv_expiry, OnionPayload::Invoice(payment_data)),
+ PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
+ (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage)),
+ _ => {
+ panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
+ }
+ };
let claimable_htlc = ClaimableHTLC {
prev_hop: HTLCPreviousHopData {
short_channel_id: prev_short_channel_id,
incoming_packet_shared_secret: incoming_shared_secret,
},
value: amt_to_forward,
- payment_data: payment_data.clone(),
- cltv_expiry: incoming_cltv_expiry,
+ cltv_expiry,
+ onion_payload,
};
macro_rules! fail_htlc {
let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
match payment_secrets.entry(payment_hash) {
hash_map::Entry::Vacant(_) => {
- log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ match claimable_htlc.onion_payload {
+ OnionPayload::Invoice(_) => {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ },
+ OnionPayload::Spontaneous(preimage) => {
+ match channel_state.claimable_htlcs.entry(payment_hash) {
+ hash_map::Entry::Vacant(e) => {
+ e.insert(vec![claimable_htlc]);
+ new_events.push(events::Event::PaymentReceived {
+ payment_hash,
+ amt: amt_to_forward,
+ purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
+ });
+ },
+ hash_map::Entry::Occupied(_) => {
+ log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ }
+ }
+ }
+ }
},
hash_map::Entry::Occupied(inbound_payment) => {
+ let payment_data =
+ if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload {
+ data.clone()
+ } else {
+ log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ continue
+ };
if inbound_payment.get().payment_secret != payment_data.payment_secret {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc);
let mut total_value = 0;
let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
.or_insert(Vec::new());
+ if htlcs.len() == 1 {
+ if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ continue
+ }
+ }
htlcs.push(claimable_htlc);
for htlc in htlcs.iter() {
total_value += htlc.value;
- if htlc.payment_data.total_msat != payment_data.total_msat {
- log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
- log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat);
- total_value = msgs::MAX_VALUE_MSAT;
+ match &htlc.onion_payload {
+ OnionPayload::Invoice(htlc_payment_data) => {
+ if htlc_payment_data.total_msat != payment_data.total_msat {
+ log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+ log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat);
+ total_value = msgs::MAX_VALUE_MSAT;
+ }
+ if total_value >= msgs::MAX_VALUE_MSAT { break; }
+ },
+ _ => unreachable!(),
}
- if total_value >= msgs::MAX_VALUE_MSAT { break; }
}
if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat {
log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
} else if total_value == payment_data.total_msat {
new_events.push(events::Event::PaymentReceived {
payment_hash,
- payment_preimage: inbound_payment.get().payment_preimage,
- payment_secret: payment_data.payment_secret,
+ purpose: events::PaymentPurpose::InvoicePayment {
+ payment_preimage: inbound_payment.get().payment_preimage,
+ payment_secret: payment_data.payment_secret,
+ user_payment_id: inbound_payment.get().user_payment_id,
+ },
amt: total_value,
- user_payment_id: inbound_payment.get().user_payment_id,
});
// Only ever generate at most one PaymentReceived
// per registered payment_hash, even if it isn't
},
};
},
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
- },
HTLCForwardInfo::FailHTLC { .. } => {
panic!("Got pending fail of our own HTLC");
}
}
#[cfg(any(test, feature = "_test_utils"))]
- pub(crate) fn test_process_background_events(&self) {
+ /// Process background events, for functional testing
+ pub fn test_process_background_events(&self) {
self.process_background_events();
}
- /// If a peer is disconnected we mark any channels with that peer as 'disabled'.
- /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
- /// to inform the network about the uselessness of these channels.
+ fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, [u8; 32]>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+ if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
+ // If the feerate has decreased by less than half, don't bother
+ if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
+ log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ return (true, NotifyOption::SkipPersist, Ok(()));
+ }
+ if !chan.is_live() {
+ log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+ return (true, NotifyOption::SkipPersist, Ok(()));
+ }
+ log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
+ log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
+
+ let mut retain_channel = true;
+ let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
+ Ok(res) => Ok(res),
+ Err(e) => {
+ let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ if drop { retain_channel = false; }
+ Err(res)
+ }
+ };
+ let ret_err = match res {
+ Ok(Some((update_fee, commitment_signed, monitor_update))) => {
+ if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id);
+ if drop { retain_channel = false; }
+ res
+ } else {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: Some(update_fee),
+ commitment_signed,
+ },
+ });
+ Ok(())
+ }
+ },
+ Ok(None) => Ok(()),
+ Err(e) => Err(e),
+ };
+ (retain_channel, NotifyOption::DoPersist, ret_err)
+ }
+
+ #[cfg(fuzzing)]
+ /// In chanmon_consistency we want to sometimes do the channel fee updates done in
+ /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
+ /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
+ /// it wants to detect). Thus, we have a variant exposed here for its benefit.
+ pub fn maybe_update_chan_fees(&self) {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut should_persist = NotifyOption::SkipPersist;
+
+ let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+ let mut handle_errors = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ let short_to_id = &mut channel_state.short_to_id;
+ channel_state.by_id.retain(|chan_id, chan| {
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+ if err.is_err() {
+ handle_errors.push(err);
+ }
+ retain_channel
+ });
+ }
+
+ should_persist
+ });
+ }
+
+ /// Performs actions which should happen on startup and roughly once per minute thereafter.
///
- /// This method handles all the details, and must be called roughly once per minute.
+ /// This currently includes:
+ /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
+ /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
+ /// than a minute, informing the network that they should no longer attempt to route over
+ /// the channel.
///
- /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
+ /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
+ /// estimate fetches.
pub fn timer_tick_occurred(&self) {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = NotifyOption::SkipPersist;
if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- for (_, chan) in channel_state.by_id.iter_mut() {
- match chan.channel_update_status() {
- ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
- ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
- ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
- ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
- ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
- if let Ok(update) = self.get_channel_update(&chan) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
- },
- ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
- if let Ok(update) = self.get_channel_update(&chan) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- should_persist = NotifyOption::DoPersist;
- chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
- },
- _ => {},
- }
+ let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+ let mut handle_errors = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ let short_to_id = &mut channel_state.short_to_id;
+ channel_state.by_id.retain(|chan_id, chan| {
+ let counterparty_node_id = chan.get_counterparty_node_id();
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+ if err.is_err() {
+ handle_errors.push((err, counterparty_node_id));
+ }
+ if !retain_channel { return false; }
+
+ if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+ let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ handle_errors.push((Err(err), chan.get_counterparty_node_id()));
+ if needs_close { return false; }
+ }
+
+ match chan.channel_update_status() {
+ ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
+ ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
+ ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+ ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+ ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+ },
+ ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+ },
+ _ => {},
+ }
+
+ true
+ });
}
+ for (err, counterparty_node_id) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
+ }
should_persist
});
}
let (failure_code, onion_failure_data) =
match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
hash_map::Entry::Occupied(chan_entry) => {
- if let Ok(upd) = self.get_channel_update(&chan_entry.get()) {
+ if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) {
(0x1000|7, upd.encode_with_len())
} else {
(0x4000|10, Vec::new())
self.fail_htlc_backwards_internal(channel_state,
htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
},
- HTLCSource::OutboundRoute { session_priv, .. } => {
- if {
- let mut session_priv_bytes = [0; 32];
- session_priv_bytes.copy_from_slice(&session_priv[..]);
- self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
- } {
- self.pending_events.lock().unwrap().push(
- events::Event::PaymentFailed {
- payment_hash,
- rejected_by_dest: false,
-#[cfg(test)]
- error_code: None,
-#[cfg(test)]
- error_data: None,
- }
- )
+ HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
+ if payment.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) {
+ self.pending_events.lock().unwrap().push(
+ events::Event::PaymentPathFailed {
+ payment_hash,
+ rejected_by_dest: false,
+ network_update: None,
+ all_paths_failed: payment.get().remaining_parts() == 0,
+ path: path.clone(),
+ short_channel_id: None,
+ #[cfg(test)]
+ error_code: None,
+ #[cfg(test)]
+ error_data: None,
+ }
+ );
+ }
} else {
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
}
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
- HTLCSource::OutboundRoute { ref path, session_priv, .. } => {
- if {
- let mut session_priv_bytes = [0; 32];
- session_priv_bytes.copy_from_slice(&session_priv[..]);
- !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
- } {
+ HTLCSource::OutboundRoute { ref path, session_priv, payment_id, .. } => {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ let mut all_paths_failed = false;
+ if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(payment_id) {
+ if !sessions.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) {
+ log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
+ return;
+ }
+ if sessions.get().remaining_parts() == 0 {
+ all_paths_failed = true;
+ }
+ } else {
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
return;
}
match &onion_error {
&HTLCFailReason::LightningError { ref err } => {
#[cfg(test)]
- let (channel_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
#[cfg(not(test))]
- let (channel_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
// TODO: If we decided to blame ourselves (or one of our channels) in
// process_onion_failure we should close that channel as it implies our
// next-hop is needlessly blaming us!
- if let Some(update) = channel_update {
- self.channel_state.lock().unwrap().pending_msg_events.push(
- events::MessageSendEvent::PaymentFailureNetworkUpdate {
- update,
- }
- );
- }
self.pending_events.lock().unwrap().push(
- events::Event::PaymentFailed {
+ events::Event::PaymentPathFailed {
payment_hash: payment_hash.clone(),
rejected_by_dest: !payment_retryable,
+ network_update,
+ all_paths_failed,
+ path: path.clone(),
+ short_channel_id,
#[cfg(test)]
error_code: onion_error_code,
#[cfg(test)]
ref data,
.. } => {
// we get a fail_malformed_htlc from the first hop
- // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary
+ // TODO: We'd like to generate a NetworkUpdate for temporary
// failures here, but that would be insufficient as get_route
// generally ignores its view of our own channels as we provide them via
// ChannelDetails.
// TODO: For non-temporary failures, we really should be closing the
// channel here as we apparently can't relay through them anyway.
self.pending_events.lock().unwrap().push(
- events::Event::PaymentFailed {
+ events::Event::PaymentPathFailed {
payment_hash: payment_hash.clone(),
rejected_by_dest: path.len() == 1,
+ network_update: None,
+ all_paths_failed,
+ path: path.clone(),
+ short_channel_id: Some(path.first().unwrap().short_channel_id),
#[cfg(test)]
error_code: Some(*failure_code),
#[cfg(test)]
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
} else {
match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
- Err(Some(e)) => {
- if let msgs::ErrorAction::IgnoreError = e.1.err.action {
+ ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
+ if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
// HTLC when the monitor updating is restored (or on chain).
- log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err);
+ log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
claimed_any_htlcs = true;
- } else { errs.push(e); }
+ } else { errs.push((pk, err)); }
},
- Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"),
- Ok(()) => claimed_any_htlcs = true,
+ ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
+ ClaimFundsFromHop::DuplicateClaim => {
+ // While we should never get here in most cases, if we do, it likely
+ // indicates that the HTLC was timed out some time ago and is no longer
+ // available to be claimed. Thus, it does not make sense to set
+ // `claimed_any_htlcs`.
+ },
+ ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
}
}
}
} else { false }
}
- fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
+ fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
let channel_state = &mut **channel_state_lock;
let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
Some(chan_id) => chan_id.clone(),
None => {
- return Err(None)
+ return ClaimFundsFromHop::PrevHopForceClosed
}
};
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
- let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
- Ok((msgs, monitor_option)) => {
- if let Some(monitor_update) = monitor_option {
+ Ok(msgs_monitor_option) => {
+ if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- if was_frozen_for_monitor {
- assert!(msgs.is_none());
- } else {
- return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
- }
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
+ "Failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, e);
+ return ClaimFundsFromHop::MonitorUpdateFail(
+ chan.get().get_counterparty_node_id(),
+ handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+ Some(htlc_value_msat)
+ );
}
+ if let Some((msg, commitment_signed)) = msgs {
+ log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+ log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: vec![msg],
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ }
+ });
+ }
+ return ClaimFundsFromHop::Success(htlc_value_msat);
+ } else {
+ return ClaimFundsFromHop::DuplicateClaim;
}
- if let Some((msg, commitment_signed)) = msgs {
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: vec![msg],
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- }
- });
- }
- return Ok(())
},
- Err(e) => {
- // TODO: Do something with e?
- // This should only occur if we are claiming an HTLC at the same time as the
- // HTLC is being failed (eg because a block is being connected and this caused
- // an HTLC to time out). This should, of course, only occur if the user is the
- // one doing the claiming (as it being a part of a peer claim would imply we're
- // about to lose funds) and only if the lock in claim_funds was dropped as a
- // previous HTLC was failed (thus not for an MPP payment).
- debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e);
- return Err(None)
+ Err((e, monitor_update)) => {
+ if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
+ "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+ payment_preimage, e);
+ }
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+ if drop {
+ chan.remove_entry();
+ }
+ return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
},
}
} else { unreachable!(); }
}
- fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+ fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
match source {
- HTLCSource::OutboundRoute { session_priv, .. } => {
+ HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
mem::drop(channel_state_lock);
- if {
- let mut session_priv_bytes = [0; 32];
- session_priv_bytes.copy_from_slice(&session_priv[..]);
- self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
- } {
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentSent {
- payment_preimage
- });
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ let found_payment = if let Some(mut sessions) = outbounds.remove(&payment_id) {
+ sessions.remove(&session_priv_bytes, path.last().unwrap().fee_msat)
+ } else { false };
+ if found_payment {
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+ self.pending_events.lock().unwrap().push(
+ events::Event::PaymentSent {
+ payment_preimage,
+ payment_hash: payment_hash
+ }
+ );
} else {
log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
}
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
- if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) {
- Ok(()) => Ok(()),
- Err(None) => {
- let preimage_update = ChannelMonitorUpdate {
- update_id: CLOSED_CHANNEL_UPDATE_ID,
- updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage: payment_preimage.clone(),
- }],
- };
- // We update the ChannelMonitor on the backward link, after
- // receiving an offchain preimage event from the forward link (the
- // event being update_fulfill_htlc).
- if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
- log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
- }
- Ok(())
- },
- Err(Some(res)) => Err(res),
- } {
- mem::drop(channel_state_lock);
- let res: Result<(), _> = Err(err);
- let _ = handle_error!(self, res, counterparty_node_id);
+ let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
+ let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
+ let htlc_claim_value_msat = match res {
+ ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
+ ClaimFundsFromHop::Success(amt) => Some(amt),
+ _ => None,
+ };
+ if let ClaimFundsFromHop::PrevHopForceClosed = res {
+ let preimage_update = ChannelMonitorUpdate {
+ update_id: CLOSED_CHANNEL_UPDATE_ID,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage.clone(),
+ }],
+ };
+ // We update the ChannelMonitor on the backward link, after
+ // receiving an offchain preimage event from the forward link (the
+ // event being update_fulfill_htlc).
+ if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+ log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, e);
+ }
+ // Note that we do *not* set `claimed_htlc` to false here. In fact, this
+ // totally could be a duplicate claim, but we have no way of knowing
+ // without interrogating the `ChannelMonitor` we've provided the above
+ // update to. Instead, we simply document in `PaymentForwarded` that this
+ // can happen.
+ }
+ mem::drop(channel_state_lock);
+ if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
+ let result: Result<(), _> = Err(err);
+ let _ = handle_error!(self, result, pk);
+ }
+
+ if claimed_htlc {
+ if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
+ let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
+ Some(claimed_htlc_value - forwarded_htlc_value)
+ } else { None };
+
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PaymentForwarded {
+ fee_earned_msat,
+ claim_from_onchain_tx: from_onchain,
+ });
+ }
}
},
}
pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut htlc_forwards = Vec::new();
- let mut htlc_failures = Vec::new();
- let mut pending_events = Vec::new();
-
- {
+ let chan_restoration_res;
+ let mut pending_failures = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) {
- Some(chan) => chan,
- None => return,
+ let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
+ hash_map::Entry::Occupied(chan) => chan,
+ hash_map::Entry::Vacant(_) => return,
};
- if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id {
+ if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
return;
}
- let (raa, commitment_update, order, pending_forwards, mut pending_failures, funding_broadcastable, funding_locked) = channel.monitor_updating_restored(&self.logger);
- if !pending_forwards.is_empty() {
- htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards));
- }
- htlc_failures.append(&mut pending_failures);
-
- macro_rules! handle_cs { () => {
- if let Some(update) = commitment_update {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: channel.get_counterparty_node_id(),
- updates: update,
- });
- }
- } }
- macro_rules! handle_raa { () => {
- if let Some(revoke_and_ack) = raa {
- pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: channel.get_counterparty_node_id(),
- msg: revoke_and_ack,
- });
- }
- } }
- match order {
- RAACommitmentOrder::CommitmentFirst => {
- handle_cs!();
- handle_raa!();
- },
- RAACommitmentOrder::RevokeAndACKFirst => {
- handle_raa!();
- handle_cs!();
- },
- }
- if let Some(tx) = funding_broadcastable {
- log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
- self.tx_broadcaster.broadcast_transaction(&tx);
- }
- if let Some(msg) = funding_locked {
- pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
- node_id: channel.get_counterparty_node_id(),
- msg,
- });
- if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
- pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: channel.get_counterparty_node_id(),
- msg: announcement_sigs,
- });
- }
- short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
+ let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
+ let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() {
+ // We only send a channel_update in the case where we are just now sending a
+ // funding_locked and the channel is in a usable state. Further, we rely on the
+ // normal announcement_signatures process to send a channel_update for public
+ // channels, only generating a unicast channel_update if this is a private channel.
+ Some(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.get().get_counterparty_node_id(),
+ msg: self.get_channel_update_for_unicast(channel.get()).unwrap(),
+ })
+ } else { None };
+ chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked);
+ if let Some(upd) = channel_update {
+ channel_state.pending_msg_events.push(upd);
}
- }
-
- self.pending_events.lock().unwrap().append(&mut pending_events);
-
- for failure in htlc_failures.drain(..) {
+ pending_failures
+ };
+ post_handle_chan_restoration!(self, chan_restoration_res);
+ for failure in pending_failures.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
- self.forward_htlcs(&mut htlc_forwards[..]);
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
}
- let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration)
+ let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration)
.map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, &their_features), channel_state, chan);
(chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
};
if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
- return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+ let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
+ }
+ }
+ return res
}
funding_tx
},
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().funding_locked(&msg, &self.logger), channel_state, chan);
if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
log_trace!(self.logger, "Sending announcement_signatures for {} in response to funding_locked", log_bytes!(chan.get().channel_id()));
// If we see locking block before receiving remote funding_locked, we broadcast our
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
});
+ } else if chan.get().is_usable() {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: counterparty_node_id.clone(),
+ msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
+ });
}
Ok(())
},
}
fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
- let (mut dropped_htlcs, chan_option) = {
+ let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
+ let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry);
+
+ if !chan_entry.get().received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ log_bytes!(msg.channel_id),
+ if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
+
+ let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry);
+ dropped_htlcs = htlcs;
+
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update {
+ if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ let (result, is_permanent) =
+ handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key());
+ if is_permanent {
+ remove_channel!(channel_state, chan_entry);
+ break result;
+ }
+ }
+ }
+
if let Some(msg) = shutdown {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
+ node_id: *counterparty_node_id,
msg,
});
}
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- if chan_entry.get().is_shutdown() {
- if let Some(short_id) = chan_entry.get().get_short_channel_id() {
- channel_state.short_to_id.remove(&short_id);
- }
- (dropped_htlcs, Some(chan_entry.remove_entry().1))
- } else { (dropped_htlcs, None) }
+
+ break Ok(());
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
for htlc_source in dropped_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- if let Some(chan) = chan_option {
- if let Ok(update) = self.get_channel_update(&chan) {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- }
+
+ let _ = handle_error!(self, result, *counterparty_node_id);
Ok(())
}
self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
}
if let Some(chan) = chan_option {
- if let Ok(update) = self.get_channel_update(&chan) {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
+ self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure);
}
Ok(())
}
}
let create_pending_htlc_status = |chan: &Channel<Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
- // Ensure error_code has the UPDATE flag set, since by default we send a
- // channel update along as part of failing the HTLC.
- assert!((error_code & 0x1000) != 0);
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
match pending_forward_info {
PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
- let reason = if let Ok(upd) = self.get_channel_update(chan) {
- onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
- let mut res = Vec::with_capacity(8 + 128);
- // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
- res.extend_from_slice(&byte_utils::be16_to_array(0));
- res.extend_from_slice(&upd.encode_with_len()[..]);
- res
- }[..])
+ let reason = if (error_code & 0x1000) != 0 {
+ if let Ok(upd) = self.get_channel_update_for_unicast(chan) {
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{
+ let mut res = Vec::with_capacity(8 + 128);
+ // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
+ res.extend_from_slice(&byte_utils::be16_to_array(0));
+ res.extend_from_slice(&upd.encode_with_len()[..]);
+ res
+ }[..])
+ } else {
+ // The only case where we'd be unable to
+ // successfully get a channel update is if the
+ // channel isn't in the fully-funded state yet,
+ // implying our counterparty is trying to route
+ // payments over the channel back to themselves
+ // (because no one else should know the short_id
+ // is a lightning channel yet). We should have
+ // no problem just calling this
+ // unknown_next_peer (0x4000|10).
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+ }
} else {
- // The only case where we'd be unable to
- // successfully get a channel update is if the
- // channel isn't in the fully-funded state yet,
- // implying our counterparty is trying to route
- // payments over the channel back to themselves
- // (cause no one else should know the short_id
- // is a lightning channel yet). We should have
- // no problem just calling this
- // unknown_next_peer (0x4000|10).
- onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[])
+ onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
};
let msg = msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let mut channel_lock = self.channel_state.lock().unwrap();
- let htlc_source = {
+ let (htlc_source, forwarded_htlc_value) = {
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
+ self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
Ok(())
}
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) =
- match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) {
+ let (revoke_and_ack, commitment_signed, monitor_update) =
+ match chan.get_mut().commitment_signed(&msg, &self.logger) {
Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
Err((Some(update), e)) => {
assert!(chan.get().is_awaiting_monitor_update());
};
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
- //TODO: Rebroadcast closing_signed if present on monitor update restoration
}
channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
node_id: counterparty_node_id.clone(),
},
});
}
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
Ok(())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
match channel_state.forward_htlcs.entry(match forward_info.routing {
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
PendingHTLCRouting::Receive { .. } => 0,
+ PendingHTLCRouting::ReceiveKeysend { .. } => 0,
}) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
- let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) =
- break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan);
+ let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) =
+ break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
htlcs_to_fail = htlcs_to_fail_in;
if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
if was_frozen_for_monitor {
- assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
+ assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
} else {
if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) {
updates,
});
}
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap()))
},
hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
}
- let our_node_id = self.get_our_node_id();
- let (announcement, our_bitcoin_sig) =
- try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan);
-
- let were_node_one = announcement.node_id_1 == our_node_id;
- let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
- {
- let their_node_key = if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 };
- let their_bitcoin_key = if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 };
- match (self.secp_ctx.verify(&msghash, &msg.node_signature, their_node_key),
- self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, their_bitcoin_key)) {
- (Err(e), _) => {
- let chan_err: ChannelError = ChannelError::Close(format!("Bad announcement_signatures. Failed to verify node_signature: {:?}. Maybe using different node_secret for transport and routing msg? UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}", e, &announcement, their_node_key));
- try_chan_entry!(self, Err(chan_err), channel_state, chan);
- },
- (_, Err(e)) => {
- let chan_err: ChannelError = ChannelError::Close(format!("Bad announcement_signatures. Failed to verify bitcoin_signature: {:?}. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})", e, &announcement, their_bitcoin_key));
- try_chan_entry!(self, Err(chan_err), channel_state, chan);
- },
- _ => {}
- }
- }
-
- let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
-
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
- msg: msgs::ChannelAnnouncement {
- node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
- node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
- bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
- bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
- contents: announcement,
- },
- update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
+ msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan),
+ // Note that announcement_signatures fails if the channel cannot be announced,
+ // so get_channel_update_for_broadcast will never fail by the time we get here.
+ update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
});
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
Ok(())
}
- fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
+ /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
+ fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
Some(chan_id) => chan_id.clone(),
None => {
// It's not a local channel
- return Ok(())
+ return Ok(NotifyOption::SkipPersist)
}
};
match channel_state.by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan) => {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
+ if chan.get().should_announce() {
+ // If the announcement is about a channel of ours which is public, some
+ // other peer may simply be forwarding all its gossip to us. Don't provide
+ // a scary-looking error message and return Ok instead.
+ return Ok(NotifyOption::SkipPersist);
+ }
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+ }
+ let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+ let msg_from_node_one = msg.contents.flags & 1 == 0;
+ if were_node_one == msg_from_node_one {
+ return Ok(NotifyOption::SkipPersist);
+ } else {
+ try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
}
- try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
},
hash_map::Entry::Vacant(_) => unreachable!()
}
- Ok(())
+ Ok(NotifyOption::DoPersist)
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
+ let chan_restoration_res;
+ let (htlcs_failed_forward, need_lnd_workaround) = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
- match channel_state.by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- // Currently, we expect all holding cell update_adds to be dropped on peer
- // disconnect, so Channel's reestablish will never hand us any holding cell
- // freed HTLCs to fail backwards. If in the future we no longer drop pending
- // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
- let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
- try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
- if let Some(monitor_update) = monitor_update_opt {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- if revoke_and_ack.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
- }
- if commitment_update.is_none() {
- order = RAACommitmentOrder::RevokeAndACKFirst;
- }
- return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some());
- //TODO: Resend the funding_locked if needed once we get the monitor running again
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- }
- if let Some(msg) = funding_locked {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
- node_id: counterparty_node_id.clone(),
- msg
- });
- }
- macro_rules! send_raa { () => {
- if let Some(msg) = revoke_and_ack {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ // Currently, we expect all holding cell update_adds to be dropped on peer
+ // disconnect, so Channel's reestablish will never hand us any holding cell
+ // freed HTLCs to fail backwards. If in the future we no longer drop pending
+ // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+ let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
+ try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+ let mut channel_update = None;
+ if let Some(msg) = shutdown {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id.clone(),
- msg
+ msg,
+ });
+ } else if chan.get().is_usable() {
+ // If the channel is in a usable state (ie the channel is not being shut
+ // down), send a unicast channel_update to our counterparty to make sure
+ // they have the latest channel parameters.
+ channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+ node_id: chan.get().get_counterparty_node_id(),
+ msg: self.get_channel_update_for_unicast(chan.get()).unwrap(),
});
}
- } }
- macro_rules! send_cu { () => {
- if let Some(updates) = commitment_update {
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id.clone(),
- updates
+ let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
+ chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked);
+ if let Some(upd) = channel_update {
+ channel_state.pending_msg_events.push(upd);
+ }
+ (htlcs_failed_forward, need_lnd_workaround)
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+ }
+ };
+ post_handle_chan_restoration!(self, chan_restoration_res);
+ self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
+
+ if let Some(funding_locked_msg) = need_lnd_workaround {
+ self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
+ }
+ Ok(())
+ }
+
+ /// Process pending events from the `chain::Watch`, returning whether any events were processed.
+ fn process_pending_monitor_events(&self) -> bool {
+ let mut failed_channels = Vec::new();
+ let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+ let has_pending_monitor_events = !pending_monitor_events.is_empty();
+ for monitor_event in pending_monitor_events.drain(..) {
+ match monitor_event {
+ MonitorEvent::HTLCEvent(htlc_update) => {
+ if let Some(preimage) = htlc_update.payment_preimage {
+ log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
+ } else {
+ log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ },
+ MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ failed_channels.push(chan.force_shutdown(false));
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ self.issue_channel_close_events(&chan, ClosureReason::CommitmentTxConfirmed);
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ },
});
}
- } }
- match order {
- RAACommitmentOrder::RevokeAndACKFirst => {
- send_raa!();
- send_cu!();
- },
- RAACommitmentOrder::CommitmentFirst => {
- send_cu!();
- send_raa!();
- },
- }
- if let Some(msg) = shutdown {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- Ok(())
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+ },
+ }
+ }
+
+ for failure in failed_channels.drain(..) {
+ self.finish_force_close_channel(failure);
}
+
+ has_pending_monitor_events
}
- /// Begin Update fee process. Allowed only on an outbound channel.
- /// If successful, will generate a UpdateHTLCs event, so you should probably poll
- /// PeerManager::process_events afterwards.
- /// Note: This API is likely to change!
- /// (C-not exported) Cause its doc(hidden) anyway
- #[doc(hidden)]
- pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let counterparty_node_id;
- let err: Result<(), _> = loop {
+ /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
+ /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
+ /// update was applied.
+ ///
+ /// This should only apply to HTLCs which were added to the holding cell because we were
+ /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
+ /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
+ /// code to inform them of a channel monitor update.
+ fn check_free_holding_cells(&self) -> bool {
+ let mut has_monitor_update = false;
+ let mut failed_htlcs = Vec::new();
+ let mut handle_errors = Vec::new();
+ {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
- match channel_state.by_id.entry(channel_id) {
- hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}),
- hash_map::Entry::Occupied(mut chan) => {
- if !chan.get().is_outbound() {
- return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()});
- }
- if chan.get().is_awaiting_monitor_update() {
- return Err(APIError::MonitorUpdateFailed);
- }
- if !chan.get().is_live() {
- return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()});
- }
- counterparty_node_id = chan.get().get_counterparty_node_id();
- if let Some((update_fee, commitment_signed, monitor_update)) =
- break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan)
- {
- if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- unimplemented!();
+ by_id.retain(|channel_id, chan| {
+ match chan.maybe_free_holding_cell_htlcs(&self.logger) {
+ Ok((commitment_opt, holding_cell_failed_htlcs)) => {
+ if !holding_cell_failed_htlcs.is_empty() {
+ failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
+ }
+ if let Some((commitment_update, monitor_update)) = commitment_opt {
+ if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ has_monitor_update = true;
+ let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), res));
+ if close_channel { return false; }
+ } else {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: commitment_update,
+ });
+ }
}
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: Some(update_fee),
- commitment_signed,
- },
- });
+ true
+ },
+ Err(e) => {
+ let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ // ChannelClosed event is generated by handle_error for us
+ !close_channel
}
- },
- }
- return Ok(())
- };
+ }
+ });
+ }
- match handle_error!(self, err, counterparty_node_id) {
- Ok(_) => unreachable!(),
- Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
+ let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
+ for (failures, channel_id) in failed_htlcs.drain(..) {
+ self.fail_holding_cell_htlcs(failures, channel_id);
+ }
+
+ for (counterparty_node_id, err) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
}
+
+ has_update
}
- /// Process pending events from the `chain::Watch`.
- fn process_pending_monitor_events(&self) {
- let mut failed_channels = Vec::new();
+ /// Check whether any channels have finished removing all pending updates after a shutdown
+ /// exchange and can now send a closing_signed.
+ /// Returns whether any closing_signed messages were generated.
+ fn maybe_generate_initial_closing_signed(&self) -> bool {
+ let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
+ let mut has_update = false;
{
- for monitor_event in self.chain_monitor.release_pending_monitor_events() {
- match monitor_event {
- MonitorEvent::HTLCEvent(htlc_update) => {
- if let Some(preimage) = htlc_update.payment_preimage {
- log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
- } else {
- log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+
+ by_id.retain(|channel_id, chan| {
+ match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+ Ok((msg_opt, tx_opt)) => {
+ if let Some(msg) = msg_opt {
+ has_update = true;
+ pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: chan.get_counterparty_node_id(), msg,
+ });
}
- },
- MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
- let by_id = &mut channel_state.by_id;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
+ if let Some(tx) = tx_opt {
+ // We're done with this channel. We got a closing_signed and sent back
+ // a closing_signed with a closing transaction to broadcast.
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- failed_channels.push(chan.force_shutdown(false));
- if let Ok(update) = self.get_channel_update(&chan) {
+
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
- pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: chan.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
- },
- });
- }
+
+ self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
+
+ log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+ self.tx_broadcaster.broadcast_transaction(&tx);
+ false
+ } else { true }
},
+ Err(e) => {
+ has_update = true;
+ let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ !close_channel
+ }
}
- }
+ });
}
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
+ for (counterparty_node_id, err) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
}
+
+ has_update
}
/// Handle a list of channel failures during a block_connected or block_disconnected call,
/// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This
/// method may return an Err if another payment with the same payment_hash is still pending.
///
- /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to
+ /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to
/// allow tracking of which events correspond with which calls to this and
/// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply
/// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events
///
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`PaymentReceived`]: events::Event::PaymentReceived
- /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id
+ /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id
pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
}
+
+ #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
+ pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ let events = core::cell::RefCell::new(Vec::new());
+ let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
+ self.process_pending_events(&event_handler);
+ events.into_inner()
+ }
+
+ #[cfg(test)]
+ pub fn has_pending_payments(&self) -> bool {
+ !self.pending_outbound_payments.lock().unwrap().is_empty()
+ }
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
L::Target: Logger,
{
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
- //TODO: This behavior should be documented. It's non-intuitive that we query
- // ChannelMonitors when clearing other events.
- self.process_pending_monitor_events();
+ let events = RefCell::new(Vec::new());
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut result = NotifyOption::SkipPersist;
- let mut ret = Vec::new();
- let mut channel_state = self.channel_state.lock().unwrap();
- mem::swap(&mut ret, &mut channel_state.pending_msg_events);
- ret
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
+
+ if self.check_free_holding_cells() {
+ result = NotifyOption::DoPersist;
+ }
+ if self.maybe_generate_initial_closing_signed() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let mut pending_events = Vec::new();
+ let mut channel_state = self.channel_state.lock().unwrap();
+ mem::swap(&mut pending_events, &mut channel_state.pending_msg_events);
+
+ if !pending_events.is_empty() {
+ events.replace(pending_events);
+ }
+
+ result
+ });
+ events.into_inner()
}
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
- T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
- F::Target: FeeEstimator,
- L::Target: Logger,
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- fn get_and_clear_pending_events(&self) -> Vec<Event> {
- //TODO: This behavior should be documented. It's non-intuitive that we query
- // ChannelMonitors when clearing other events.
- self.process_pending_monitor_events();
+ /// Processes events that must be periodically handled.
+ ///
+ /// An [`EventHandler`] may safely call back to the provider in order to handle an event.
+ /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
+ ///
+ /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared
+ /// when processed, an [`EventHandler`] must be able to handle previously seen events when
+ /// restarting from an old state.
+ fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut result = NotifyOption::SkipPersist;
- let mut ret = Vec::new();
- let mut pending_events = self.pending_events.lock().unwrap();
- mem::swap(&mut ret, &mut *pending_events);
- ret
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
+
+ for event in pending_events.drain(..) {
+ handler.handle_event(&event);
+ }
+
+ result
+ });
}
}
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
- self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time));
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, &self.logger));
}
}
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time));
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, &self.logger));
macro_rules! max_time {
($timestamp: expr) => {
payment_secrets.retain(|_, inbound_payment| {
inbound_payment.expiry_time > header.time as u64
});
+
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ outbounds.retain(|_, payment| {
+ const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
+ if payment.remaining_parts() != 0 { return true }
+ if let PendingOutboundPayment::Retryable { starting_block_height, .. } = payment {
+ return *starting_block_height + PAYMENT_EXPIRY_BLOCKS > height
+ }
+ true
+ });
}
fn get_relevant_txids(&self) -> Vec<Txid> {
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.get_funding_txo() {
if funding_txo.txid == *txid {
- channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new()))
+ channel.funding_transaction_unconfirmed(&self.logger).map(|_| (None, Vec::new()))
} else { Ok((None, Vec::new())) }
} else { Ok((None, Vec::new())) }
});
let res = f(channel);
if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
- let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
+ let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now
data: chan_update,
node_id: channel.get_counterparty_node_id(),
msg: announcement_sigs,
});
+ } else if channel.is_usable() {
+ log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+ pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.get_counterparty_node_id(),
+ msg: self.get_channel_update_for_unicast(channel).unwrap(),
+ });
} else {
log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id()));
}
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
failed_channels.push(channel.force_shutdown(true));
- if let Ok(update) = self.get_channel_update(&channel) {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
+ self.issue_channel_close_events(channel, ClosureReason::CommitmentTxConfirmed);
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
let guard = mtx.lock().unwrap();
*guard
}
+
+ /// Gets the latest best block which was connected either via the [`chain::Listen`] or
+ /// [`chain::Confirm`] interfaces.
+ pub fn current_best_block(&self) -> BestBlock {
+ self.best_block.read().unwrap().clone()
+ }
}
impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
}
fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
+ persist
+ } else {
+ NotifyOption::SkipPersist
+ }
+ });
}
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut failed_channels = Vec::new();
- let mut failed_payments = Vec::new();
let mut no_channels_remain = true;
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
short_to_id.remove(&short_id);
}
failed_channels.push(chan.force_shutdown(true));
- if let Ok(update) = self.get_channel_update(&chan) {
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
+ self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
false
} else {
true
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
channel_state.by_id.retain(|_, chan| {
if chan.get_counterparty_node_id() == *counterparty_node_id {
- // Note that currently on channel reestablish we assert that there are no
- // holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs
- // on peer disconnect here, there will need to be corresponding changes in
- // reestablish logic.
- let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- if !failed_adds.is_empty() {
- let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
- failed_payments.push((chan_update, failed_adds));
- }
+ chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
if chan.is_shutdown() {
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
}
+ self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
return false;
} else {
no_channels_remain = false;
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
}
- for (chan_update, mut htlc_sources) in failed_payments {
- for (htlc_source, payment_hash) in htlc_sources.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
- }
- }
}
fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) {
if msg.channel_id == [0; 32] {
for chan in self.list_channels() {
- if chan.remote_network_id == *counterparty_node_id {
+ if chan.counterparty.node_id == *counterparty_node_id {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id));
+ let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data));
}
}
} else {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id));
+ let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
}
}
}
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
-impl Writeable for PendingHTLCInfo {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match &self.routing {
- &PendingHTLCRouting::Forward { ref onion_packet, ref short_channel_id } => {
- 0u8.write(writer)?;
- onion_packet.write(writer)?;
- short_channel_id.write(writer)?;
- },
- &PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => {
- 1u8.write(writer)?;
- payment_data.payment_secret.write(writer)?;
- payment_data.total_msat.write(writer)?;
- incoming_cltv_expiry.write(writer)?;
- },
- }
- self.incoming_shared_secret.write(writer)?;
- self.payment_hash.write(writer)?;
- self.amt_to_forward.write(writer)?;
- self.outgoing_cltv_value.write(writer)?;
- Ok(())
- }
-}
+impl_writeable_tlv_based_enum!(PendingHTLCRouting,
+ (0, Forward) => {
+ (0, onion_packet, required),
+ (2, short_channel_id, required),
+ },
+ (1, Receive) => {
+ (0, payment_data, required),
+ (2, incoming_cltv_expiry, required),
+ },
+ (2, ReceiveKeysend) => {
+ (0, payment_preimage, required),
+ (2, incoming_cltv_expiry, required),
+ },
+;);
+
+impl_writeable_tlv_based!(PendingHTLCInfo, {
+ (0, routing, required),
+ (2, incoming_shared_secret, required),
+ (4, payment_hash, required),
+ (6, amt_to_forward, required),
+ (8, outgoing_cltv_value, required)
+});
-impl Readable for PendingHTLCInfo {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<PendingHTLCInfo, DecodeError> {
- Ok(PendingHTLCInfo {
- routing: match Readable::read(reader)? {
- 0u8 => PendingHTLCRouting::Forward {
- onion_packet: Readable::read(reader)?,
- short_channel_id: Readable::read(reader)?,
- },
- 1u8 => PendingHTLCRouting::Receive {
- payment_data: msgs::FinalOnionHopData {
- payment_secret: Readable::read(reader)?,
- total_msat: Readable::read(reader)?,
- },
- incoming_cltv_expiry: Readable::read(reader)?,
- },
- _ => return Err(DecodeError::InvalidValue),
- },
- incoming_shared_secret: Readable::read(reader)?,
- payment_hash: Readable::read(reader)?,
- amt_to_forward: Readable::read(reader)?,
- outgoing_cltv_value: Readable::read(reader)?,
- })
- }
-}
impl Writeable for HTLCFailureMsg {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self {
- &HTLCFailureMsg::Relay(ref fail_msg) => {
+ HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
0u8.write(writer)?;
- fail_msg.write(writer)?;
+ channel_id.write(writer)?;
+ htlc_id.write(writer)?;
+ reason.write(writer)?;
},
- &HTLCFailureMsg::Malformed(ref fail_msg) => {
+ HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id, htlc_id, sha256_of_onion, failure_code
+ }) => {
1u8.write(writer)?;
- fail_msg.write(writer)?;
- }
+ channel_id.write(writer)?;
+ htlc_id.write(writer)?;
+ sha256_of_onion.write(writer)?;
+ failure_code.write(writer)?;
+ },
}
Ok(())
}
}
impl Readable for HTLCFailureMsg {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<HTLCFailureMsg, DecodeError> {
- match <u8 as Readable>::read(reader)? {
- 0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)),
- 1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)),
- _ => Err(DecodeError::InvalidValue),
- }
- }
-}
-
-impl Writeable for PendingHTLCStatus {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match self {
- &PendingHTLCStatus::Forward(ref forward_info) => {
- 0u8.write(writer)?;
- forward_info.write(writer)?;
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let id: u8 = Readable::read(reader)?;
+ match id {
+ 0 => {
+ Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: Readable::read(reader)?,
+ htlc_id: Readable::read(reader)?,
+ reason: Readable::read(reader)?,
+ }))
},
- &PendingHTLCStatus::Fail(ref fail_msg) => {
- 1u8.write(writer)?;
- fail_msg.write(writer)?;
- }
+ 1 => {
+ Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: Readable::read(reader)?,
+ htlc_id: Readable::read(reader)?,
+ sha256_of_onion: Readable::read(reader)?,
+ failure_code: Readable::read(reader)?,
+ }))
+ },
+ // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
+ // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
+ // messages contained in the variants.
+ // In version 0.0.101, support for reading the variants with these types was added, and
+ // we should migrate to writing these variants when UpdateFailHTLC or
+ // UpdateFailMalformedHTLC get TLV fields.
+ 2 => {
+ let length: BigSize = Readable::read(reader)?;
+ let mut s = FixedLengthReader::new(reader, length.0);
+ let res = Readable::read(&mut s)?;
+ s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
+ Ok(HTLCFailureMsg::Relay(res))
+ },
+ 3 => {
+ let length: BigSize = Readable::read(reader)?;
+ let mut s = FixedLengthReader::new(reader, length.0);
+ let res = Readable::read(&mut s)?;
+ s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
+ Ok(HTLCFailureMsg::Malformed(res))
+ },
+ _ => Err(DecodeError::UnknownRequiredFeature),
}
- Ok(())
}
}
-impl Readable for PendingHTLCStatus {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<PendingHTLCStatus, DecodeError> {
- match <u8 as Readable>::read(reader)? {
- 0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)),
- 1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)),
- _ => Err(DecodeError::InvalidValue),
- }
- }
-}
+impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
+ (0, Forward),
+ (1, Fail),
+);
-impl_writeable!(HTLCPreviousHopData, 0, {
- short_channel_id,
- outpoint,
- htlc_id,
- incoming_packet_shared_secret
+impl_writeable_tlv_based!(HTLCPreviousHopData, {
+ (0, short_channel_id, required),
+ (2, outpoint, required),
+ (4, htlc_id, required),
+ (6, incoming_packet_shared_secret, required)
});
impl Writeable for ClaimableHTLC {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- self.prev_hop.write(writer)?;
- self.value.write(writer)?;
- self.payment_data.payment_secret.write(writer)?;
- self.payment_data.total_msat.write(writer)?;
- self.cltv_expiry.write(writer)
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
+ let payment_data = match &self.onion_payload {
+ OnionPayload::Invoice(data) => Some(data.clone()),
+ _ => None,
+ };
+ let keysend_preimage = match self.onion_payload {
+ OnionPayload::Invoice(_) => None,
+ OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
+ };
+ write_tlv_fields!
+ (writer,
+ {
+ (0, self.prev_hop, required), (2, self.value, required),
+ (4, payment_data, option), (6, self.cltv_expiry, required),
+ (8, keysend_preimage, option),
+ });
+ Ok(())
}
}
impl Readable for ClaimableHTLC {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
- Ok(ClaimableHTLC {
- prev_hop: Readable::read(reader)?,
- value: Readable::read(reader)?,
- payment_data: msgs::FinalOnionHopData {
- payment_secret: Readable::read(reader)?,
- total_msat: Readable::read(reader)?,
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let mut prev_hop = ::util::ser::OptionDeserWrapper(None);
+ let mut value = 0;
+ let mut payment_data: Option<msgs::FinalOnionHopData> = None;
+ let mut cltv_expiry = 0;
+ let mut keysend_preimage: Option<PaymentPreimage> = None;
+ read_tlv_fields!
+ (reader,
+ {
+ (0, prev_hop, required), (2, value, required),
+ (4, payment_data, option), (6, cltv_expiry, required),
+ (8, keysend_preimage, option)
+ });
+ let onion_payload = match keysend_preimage {
+ Some(p) => {
+ if payment_data.is_some() {
+ return Err(DecodeError::InvalidValue)
+ }
+ OnionPayload::Spontaneous(p)
},
- cltv_expiry: Readable::read(reader)?,
- })
- }
-}
-
-impl Writeable for HTLCSource {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match self {
- &HTLCSource::PreviousHopData(ref hop_data) => {
- 0u8.write(writer)?;
- hop_data.write(writer)?;
+ None => {
+ if payment_data.is_none() {
+ return Err(DecodeError::InvalidValue)
+ }
+ OnionPayload::Invoice(payment_data.unwrap())
},
- &HTLCSource::OutboundRoute { ref path, ref session_priv, ref first_hop_htlc_msat } => {
- 1u8.write(writer)?;
- path.write(writer)?;
- session_priv.write(writer)?;
- first_hop_htlc_msat.write(writer)?;
- }
- }
- Ok(())
+ };
+ Ok(Self {
+ prev_hop: prev_hop.0.unwrap(),
+ value,
+ onion_payload,
+ cltv_expiry,
+ })
}
}
impl Readable for HTLCSource {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<HTLCSource, DecodeError> {
- match <u8 as Readable>::read(reader)? {
- 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
- 1 => Ok(HTLCSource::OutboundRoute {
- path: Readable::read(reader)?,
- session_priv: Readable::read(reader)?,
- first_hop_htlc_msat: Readable::read(reader)?,
- }),
- _ => Err(DecodeError::InvalidValue),
- }
- }
-}
-
-impl Writeable for HTLCFailReason {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
- match self {
- &HTLCFailReason::LightningError { ref err } => {
- 0u8.write(writer)?;
- err.write(writer)?;
- },
- &HTLCFailReason::Reason { ref failure_code, ref data } => {
- 1u8.write(writer)?;
- failure_code.write(writer)?;
- data.write(writer)?;
+ fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ let id: u8 = Readable::read(reader)?;
+ match id {
+ 0 => {
+ let mut session_priv: ::util::ser::OptionDeserWrapper<SecretKey> = ::util::ser::OptionDeserWrapper(None);
+ let mut first_hop_htlc_msat: u64 = 0;
+ let mut path = Some(Vec::new());
+ let mut payment_id = None;
+ read_tlv_fields!(reader, {
+ (0, session_priv, required),
+ (1, payment_id, option),
+ (2, first_hop_htlc_msat, required),
+ (4, path, vec_type),
+ });
+ if payment_id.is_none() {
+ // For backwards compat, if there was no payment_id written, use the session_priv bytes
+ // instead.
+ payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
+ }
+ Ok(HTLCSource::OutboundRoute {
+ session_priv: session_priv.0.unwrap(),
+ first_hop_htlc_msat: first_hop_htlc_msat,
+ path: path.unwrap(),
+ payment_id: payment_id.unwrap(),
+ })
}
- }
- Ok(())
- }
-}
-
-impl Readable for HTLCFailReason {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<HTLCFailReason, DecodeError> {
- match <u8 as Readable>::read(reader)? {
- 0 => Ok(HTLCFailReason::LightningError { err: Readable::read(reader)? }),
- 1 => Ok(HTLCFailReason::Reason {
- failure_code: Readable::read(reader)?,
- data: Readable::read(reader)?,
- }),
- _ => Err(DecodeError::InvalidValue),
+ 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
+ _ => Err(DecodeError::UnknownRequiredFeature),
}
}
}
-impl Writeable for HTLCForwardInfo {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+impl Writeable for HTLCSource {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::io::Error> {
match self {
- &HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_funding_outpoint, ref prev_htlc_id, ref forward_info } => {
+ HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
0u8.write(writer)?;
- prev_short_channel_id.write(writer)?;
- prev_funding_outpoint.write(writer)?;
- prev_htlc_id.write(writer)?;
- forward_info.write(writer)?;
- },
- &HTLCForwardInfo::FailHTLC { ref htlc_id, ref err_packet } => {
+ let payment_id_opt = Some(payment_id);
+ write_tlv_fields!(writer, {
+ (0, session_priv, required),
+ (1, payment_id_opt, option),
+ (2, first_hop_htlc_msat, required),
+ (4, path, vec_type),
+ });
+ }
+ HTLCSource::PreviousHopData(ref field) => {
1u8.write(writer)?;
- htlc_id.write(writer)?;
- err_packet.write(writer)?;
- },
+ field.write(writer)?;
+ }
}
Ok(())
}
}
-impl Readable for HTLCForwardInfo {
- fn read<R: ::std::io::Read>(reader: &mut R) -> Result<HTLCForwardInfo, DecodeError> {
- match <u8 as Readable>::read(reader)? {
- 0 => Ok(HTLCForwardInfo::AddHTLC {
- prev_short_channel_id: Readable::read(reader)?,
- prev_funding_outpoint: Readable::read(reader)?,
- prev_htlc_id: Readable::read(reader)?,
- forward_info: Readable::read(reader)?,
- }),
- 1 => Ok(HTLCForwardInfo::FailHTLC {
- htlc_id: Readable::read(reader)?,
- err_packet: Readable::read(reader)?,
- }),
- _ => Err(DecodeError::InvalidValue),
- }
- }
-}
-
-impl_writeable!(PendingInboundPayment, 0, {
- payment_secret,
- expiry_time,
- user_payment_id,
- payment_preimage,
- min_value_msat
+impl_writeable_tlv_based_enum!(HTLCFailReason,
+ (0, LightningError) => {
+ (0, err, required),
+ },
+ (1, Reason) => {
+ (0, failure_code, required),
+ (2, data, vec_type),
+ },
+;);
+
+impl_writeable_tlv_based_enum!(HTLCForwardInfo,
+ (0, AddHTLC) => {
+ (0, forward_info, required),
+ (2, prev_short_channel_id, required),
+ (4, prev_htlc_id, required),
+ (6, prev_funding_outpoint, required),
+ },
+ (1, FailHTLC) => {
+ (0, htlc_id, required),
+ (2, err_packet, required),
+ },
+;);
+
+impl_writeable_tlv_based!(PendingInboundPayment, {
+ (0, payment_secret, required),
+ (2, expiry_time, required),
+ (4, user_payment_id, required),
+ (6, payment_preimage, required),
+ (8, min_value_msat, required),
});
+impl_writeable_tlv_based_enum!(PendingOutboundPayment,
+ (0, Legacy) => {
+ (0, session_privs, required),
+ },
+ (2, Retryable) => {
+ (0, session_privs, required),
+ (2, payment_hash, required),
+ (4, payment_secret, option),
+ (6, total_msat, required),
+ (8, pending_amt_msat, required),
+ (10, starting_block_height, required),
+ },
+;);
+
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
let _consistency_lock = self.total_consistency_lock.write().unwrap();
- writer.write_all(&[SERIALIZATION_VERSION; 1])?;
- writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+ write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
self.genesis_hash.write(writer)?;
{
}
let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
- (pending_outbound_payments.len() as u64).write(writer)?;
- for session_priv in pending_outbound_payments.iter() {
- session_priv.write(writer)?;
+ // For backwards compat, write the session privs and their total length.
+ let mut num_pending_outbounds_compat: u64 = 0;
+ for (_, outbound) in pending_outbound_payments.iter() {
+ num_pending_outbounds_compat += outbound.remaining_parts() as u64;
+ }
+ num_pending_outbounds_compat.write(writer)?;
+ for (_, outbound) in pending_outbound_payments.iter() {
+ match outbound {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } => {
+ for session_priv in session_privs.iter() {
+ session_priv.write(writer)?;
+ }
+ }
+ }
+ }
+
+ // Encode without retry info for 0.0.101 compatibility.
+ let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
+ for (id, outbound) in pending_outbound_payments.iter() {
+ match outbound {
+ PendingOutboundPayment::Legacy { session_privs } |
+ PendingOutboundPayment::Retryable { session_privs, .. } => {
+ pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
+ }
+ }
}
+ write_tlv_fields!(writer, {
+ (1, pending_outbound_payments_no_retry, required),
+ (3, pending_outbound_payments, required),
+ });
Ok(())
}
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
- let _ver: u8 = Readable::read(reader)?;
- let min_ver: u8 = Readable::read(reader)?;
- if min_ver > SERIALIZATION_VERSION {
- return Err(DecodeError::UnknownVersion);
- }
+ fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
let best_block_height: u32 = Readable::read(reader)?;
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut channel_closures = Vec::new();
for _ in 0..channel_count {
let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
// If the channel is ahead of the monitor, return InvalidValue:
+ log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+ log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+ log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
+ log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+ log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+ log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning");
return Err(DecodeError::InvalidValue);
} else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
// But if the channel is behind of the monitor, close the channel:
+ log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
+ log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
+ log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
+ log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
failed_htlcs.append(&mut new_failed_htlcs);
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
+ channel_closures.push(events::Event::ChannelClosed {
+ channel_id: channel.channel_id(),
+ reason: ClosureReason::OutdatedChannelManager
+ });
} else {
if let Some(short_channel_id) = channel.get_short_channel_id() {
short_to_id.insert(short_channel_id, channel.channel_id());
by_id.insert(channel.channel_id(), channel);
}
} else {
+ log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
+ log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+ log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
+ log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/rust-bitcoin/rust-lightning");
return Err(DecodeError::InvalidValue);
}
}
None => continue,
}
}
+ if forward_htlcs_count > 0 {
+ // If we have pending HTLCs to forward, assume we either dropped a
+ // `PendingHTLCsForwardable` or the user received it but never processed it as they
+ // shut down before the timer hit. Either way, set the time_forwardable to a small
+ // constant as enough time has likely passed that we should simply handle the forwards
+ // now, or at least after the user gets a chance to reconnect to our peers.
+ pending_events_read.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable: Duration::from_secs(2),
+ });
+ }
let background_event_count: u64 = Readable::read(reader)?;
let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
}
}
- let pending_outbound_payments_count: u64 = Readable::read(reader)?;
- let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32));
- for _ in 0..pending_outbound_payments_count {
- if !pending_outbound_payments.insert(Readable::read(reader)?) {
- return Err(DecodeError::InvalidValue);
+ let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
+ let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
+ HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
+ for _ in 0..pending_outbound_payments_count_compat {
+ let session_priv = Readable::read(reader)?;
+ let payment = PendingOutboundPayment::Legacy {
+ session_privs: [session_priv].iter().cloned().collect()
+ };
+ if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
+ return Err(DecodeError::InvalidValue)
+ };
+ }
+
+ // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
+ let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
+ let mut pending_outbound_payments = None;
+ read_tlv_fields!(reader, {
+ (1, pending_outbound_payments_no_retry, option),
+ (3, pending_outbound_payments, option),
+ });
+ if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
+ pending_outbound_payments = Some(pending_outbound_payments_compat);
+ } else if pending_outbound_payments.is_none() {
+ let mut outbounds = HashMap::new();
+ for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
+ outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
}
+ pending_outbound_payments = Some(outbounds);
}
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
+ if !channel_closures.is_empty() {
+ pending_events_read.append(&mut channel_closures);
+ }
+
let channel_manager = ChannelManager {
genesis_hash,
fee_estimator: args.fee_estimator,
pending_msg_events: Vec::new(),
}),
pending_inbound_payments: Mutex::new(pending_inbound_payments),
- pending_outbound_payments: Mutex::new(pending_outbound_payments),
+ pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
our_network_key: args.keys_manager.get_node_secret(),
our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
#[cfg(test)]
mod tests {
- use ln::channelmanager::PersistenceNotifier;
- use std::sync::Arc;
- use std::sync::atomic::{AtomicBool, Ordering};
- use std::thread;
- use std::time::Duration;
+ use bitcoin::hashes::Hash;
+ use bitcoin::hashes::sha256::Hash as Sha256;
+ use core::time::Duration;
+ use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+ use ln::channelmanager::{PaymentId, PaymentSendFailure};
+ use ln::features::{InitFeatures, InvoiceFeatures};
+ use ln::functional_test_utils::*;
+ use ln::msgs;
+ use ln::msgs::ChannelMessageHandler;
+ use routing::router::{get_keysend_route, get_route};
+ use util::errors::APIError;
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use util::test_utils;
+ #[cfg(feature = "std")]
#[test]
fn test_wait_timeout() {
+ use ln::channelmanager::PersistenceNotifier;
+ use sync::Arc;
+ use core::sync::atomic::{AtomicBool, Ordering};
+ use std::thread;
+
let persistence_notifier = Arc::new(PersistenceNotifier::new());
let thread_notifier = Arc::clone(&persistence_notifier);
}
}
}
+
+ #[test]
+ fn test_notify_limits() {
+ // Check that a few cases which don't require the persistence of a new ChannelManager,
+ // indeed, do not cause the persistence of a new ChannelManager.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // All nodes start with a persistable update pending as `create_network` connects each node
+ // with all other nodes to make most tests simpler.
+ assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+ let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+
+ // We check that the channel info nodes have doesn't change too early, even though we try
+ // to connect messages with new values
+ chan.0.contents.fee_base_msat *= 2;
+ chan.1.contents.fee_base_msat *= 2;
+ let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
+ let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
+
+ // The first two nodes (which opened a channel) should now require fresh persistence
+ assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ // ... but the last node should not.
+ assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ // After persisting the first two nodes they should no longer need fresh persistence.
+ assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+ // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
+ // about the channel.
+ nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
+ nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
+ assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+ // The nodes which are a party to the channel should also ignore messages from unrelated
+ // parties.
+ nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
+ nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
+ nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
+ nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
+ assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+
+ // At this point the channel info given by peers should still be the same.
+ assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+ assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+ // An earlier version of handle_channel_update didn't check the directionality of the
+ // update message and would always update the local fee info, even if our peer was
+ // (spuriously) forwarding us our own channel_update.
+ let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
+ let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
+ let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
+
+ // First deliver each peers' own message, checking that the node doesn't need to be
+ // persisted and that its channel info remains the same.
+ nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
+ nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
+ assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
+ assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
+
+ // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
+ // the channel info has updated.
+ nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
+ nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
+ assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
+ assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
+ assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
+ }
+
+ #[test]
+ fn test_keysend_dup_hash_partial_mpp() {
+ // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
+ // expected.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let logger = test_utils::TestLogger::new();
+
+ // First, send a partial MPP payment.
+ let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+ let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+ let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]);
+ let payment_id = PaymentId([42; 32]);
+ // Use the utility function send_payment_along_path to send the payment with MPP data which
+ // indicates there are more HTLCs coming.
+ let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
+ nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
+
+ // Next, send a keysend payment with the same payment_hash and make sure it fails.
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = events.drain(..).next().unwrap();
+ let payment_event = SendEvent::from_event(ev);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], our_payment_hash, true);
+
+ // Send the second half of the original MPP payment.
+ nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
+
+ // Claim the full MPP payment. Note that we can't use a test utility like
+ // claim_funds_along_route because the ordering of the messages causes the second half of the
+ // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
+ // lightning messages manually.
+ assert!(nodes[1].node.claim_funds(payment_preimage));
+ check_added_monitors!(nodes[1], 2);
+ let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+ let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
+ check_added_monitors!(nodes[1], 1);
+ let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs);
+ check_added_monitors!(nodes[1], 1);
+ let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
+ check_added_monitors!(nodes[0], 1);
+ let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
+ let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ check_added_monitors!(nodes[0], 1);
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
+ check_added_monitors!(nodes[1], 1);
+ let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
+ check_added_monitors!(nodes[0], 1);
+
+ // Note that successful MPP payments will generate 1 event upon the first path's success. No
+ // further events will be generated for subsequence path successes.
+ let events = nodes[0].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::PaymentSent { payment_preimage: ref preimage, payment_hash: ref hash } => {
+ assert_eq!(payment_preimage, *preimage);
+ assert_eq!(our_payment_hash, *hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ #[test]
+ fn test_keysend_dup_payment_hash() {
+ // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
+ // outbound regular payment fails as expected.
+ // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
+ // fails as expected.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let logger = test_utils::TestLogger::new();
+
+ // To start (1), send a regular payment but don't claim it.
+ let expected_route = [&nodes[1]];
+ let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
+
+ // Next, attempt a keysend payment and make sure it fails.
+ let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = events.drain(..).next().unwrap();
+ let payment_event = SendEvent::from_event(ev);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], payment_hash, true);
+
+ // Finally, claim the original payment.
+ claim_payment(&nodes[0], &expected_route, payment_preimage);
+
+ // To start (2), send a keysend payment but don't claim it.
+ let payment_preimage = PaymentPreimage([42; 32]);
+ let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap();
+ let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let event = events.pop().unwrap();
+ let path = vec![&nodes[1]];
+ pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
+
+ // Next, attempt a regular payment and make sure it fails.
+ let payment_secret = PaymentSecret([43; 32]);
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = events.drain(..).next().unwrap();
+ let payment_event = SendEvent::from_event(ev);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
+ expect_payment_failed!(nodes[0], payment_hash, true);
+
+ // Finally, succeed the keysend payment.
+ claim_payment(&nodes[0], &expected_route, payment_preimage);
+ }
+
+ #[test]
+ fn test_keysend_hash_mismatch() {
+ // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
+ // preimage doesn't match the msg's payment hash.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let payer_pubkey = nodes[0].node.get_our_node_id();
+ let payee_pubkey = nodes[1].node.get_our_node_id();
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+ let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
+ let first_hops = nodes[0].node.list_usable_channels();
+ let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey,
+ Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+ nodes[0].logger).unwrap();
+
+ let test_preimage = PaymentPreimage([42; 32]);
+ let mismatch_payment_hash = PaymentHash([43; 32]);
+ let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert_eq!(updates.update_add_htlcs.len(), 1);
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
+ }
+
+ #[test]
+ fn test_keysend_msg_with_secret_err() {
+ // Test that we error as expected if we receive a keysend payment that includes a payment secret.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let payer_pubkey = nodes[0].node.get_our_node_id();
+ let payee_pubkey = nodes[1].node.get_our_node_id();
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() });
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() });
+
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+ let network_graph = &nodes[0].net_graph_msg_handler.network_graph;
+ let first_hops = nodes[0].node.list_usable_channels();
+ let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey,
+ Some(&first_hops.iter().collect::<Vec<_>>()), &vec![], 10000, 40,
+ nodes[0].logger).unwrap();
+
+ let test_preimage = PaymentPreimage([42; 32]);
+ let test_secret = PaymentSecret([43; 32]);
+ let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
+ let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert_eq!(updates.update_add_htlcs.len(), 1);
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+ nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
+ }
+
+ #[test]
+ fn test_multi_hop_missing_secret() {
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let logger = test_utils::TestLogger::new();
+
+ // Marshall an MPP route.
+ let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]);
+ let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+ let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+ let path = route.paths[0].clone();
+ route.paths.push(path);
+ route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+ route.paths[0][0].short_channel_id = chan_1_id;
+ route.paths[0][1].short_channel_id = chan_3_id;
+ route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+ route.paths[1][0].short_channel_id = chan_2_id;
+ route.paths[1][1].short_channel_id = chan_4_id;
+
+ match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() {
+ PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
+ assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) },
+ _ => panic!("unexpected error")
+ }
+ }
}
#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
use ln::features::{InitFeatures, InvoiceFeatures};
use ln::functional_test_utils::*;
- use ln::msgs::ChannelMessageHandler;
+ use ln::msgs::{ChannelMessageHandler, Init};
use routing::network_graph::NetworkGraph;
use routing::router::get_route;
use util::test_utils;
use util::config::UserConfig;
- use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::{Block, BlockHeader, Transaction, TxOut};
- use std::sync::Mutex;
+ use sync::{Arc, Mutex};
use test::Bencher;
let network = bitcoin::Network::Testnet;
let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
- let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
- let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+ let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
+ let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let mut config: UserConfig = Default::default();
config.own_channel_config.minimum_depth = 1;
});
let node_b_holder = NodeHolder { node: &node_b };
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() });
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() });
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
Listen::block_connected(&node_b, &block, 1);
node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
- node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+ let msg_events = node_a.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 2);
+ match msg_events[0] {
+ MessageSendEvent::SendFundingLocked { ref msg, .. } => {
+ node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+ get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
+ },
+ _ => panic!(),
+ }
+ match msg_events[1] {
+ MessageSendEvent::SendChannelUpdate { .. } => {},
+ _ => panic!(),
+ }
let dummy_graph = NetworkGraph::new(genesis_hash);