//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
//! upon reconnect to the relevant peer(s).
//!
-//! It does not manage routing logic (see routing::router::get_route for that) nor does it manage constructing
+//! It does not manage routing logic (see [`find_route`] for that) nor does it manage constructing
//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
//! imply it needs to fail HTLCs/payments/channels it manages).
//!
+//! [`find_route`]: crate::routing::router::find_route
use bitcoin::blockdata::block::BlockHeader;
use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
-use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::{BlockHash, Txid};
-use bitcoin::secp256k1::key::{SecretKey,PublicKey};
+use bitcoin::secp256k1::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1::ecdh::SharedSecret;
-use bitcoin::secp256k1;
+use bitcoin::{LockTime, secp256k1, Sequence};
-use chain;
-use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
-use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
-use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
-use chain::transaction::{OutPoint, TransactionData};
+use crate::chain;
+use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
+use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
+use crate::chain::transaction::{OutPoint, TransactionData};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
-use ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
-use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
-use ln::features::{ChannelTypeFeatures, InitFeatures, NodeFeatures};
-use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
-use ln::msgs;
-use ln::msgs::NetAddress;
-use ln::onion_utils;
-use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
-use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
-use util::config::UserConfig;
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
-use util::{byte_utils, events};
-use util::scid_utils::fake_scid;
-use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
-use util::logger::{Level, Logger};
-use util::errors::APIError;
-
-use io;
-use prelude::*;
+use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
+use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
+use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
+#[cfg(any(feature = "_test_utils", test))]
+use crate::ln::features::InvoiceFeatures;
+use crate::routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
+use crate::ln::msgs;
+use crate::ln::onion_utils;
+use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
+use crate::ln::wire::Encode;
+use crate::chain::keysinterface::{Sign, KeysInterface, KeysManager, Recipient};
+use crate::util::config::{UserConfig, ChannelConfig};
+use crate::util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
+use crate::util::{byte_utils, events};
+use crate::util::wakers::{Future, Notifier};
+use crate::util::scid_utils::fake_scid;
+use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
+use crate::util::logger::{Level, Logger};
+use crate::util::errors::APIError;
+
+use crate::io;
+use crate::prelude::*;
use core::{cmp, mem};
use core::cell::RefCell;
-use io::Read;
-use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
+use crate::io::Read;
+use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::time::Duration;
use core::ops::Deref;
-#[cfg(any(test, feature = "std"))]
-use std::time::Instant;
-use util::crypto::sign;
-
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
pub(super) enum PendingHTLCRouting {
Forward {
onion_packet: msgs::OnionPacket,
+ /// The SCID from the onion that we should forward to. This could be a "real" SCID, an
+ /// outbound SCID alias, or a phantom node SCID.
short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
},
Receive {
// `process_pending_htlc_forwards()` for constructing the
// `HTLCSource::PreviousHopData` for failed and forwarded
// HTLCs.
+ //
+ // Note that this may be an outbound SCID alias for the associated channel.
prev_short_channel_id: u64,
prev_htlc_id: u64,
prev_funding_outpoint: OutPoint,
/// Tracks the inbound corresponding to an outbound HTLC
#[derive(Clone, Hash, PartialEq, Eq)]
pub(crate) struct HTLCPreviousHopData {
+ // Note that this may be an outbound SCID alias for the associated channel.
short_channel_id: u64,
htlc_id: u64,
incoming_packet_shared_secret: [u8; 32],
}
enum OnionPayload {
- /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
- /// payment_secret which prevents path-probing attacks and can associate different HTLCs which
- /// are part of the same payment.
- Invoice(msgs::FinalOnionHopData),
+ /// Indicates this incoming onion payload is for the purpose of paying an invoice.
+ Invoice {
+ /// This is only here for backwards-compatibility in serialization, in the future it can be
+ /// removed, breaking clients running 0.0.106 and earlier.
+ _legacy_hop_data: Option<msgs::FinalOnionHopData>,
+ },
/// Contains the payer-provided preimage.
Spontaneous(PaymentPreimage),
}
+/// HTLCs that are to us and can be failed/claimed by the user
struct ClaimableHTLC {
prev_hop: HTLCPreviousHopData,
cltv_expiry: u32,
+ /// The amount (in msats) of this MPP part
value: u64,
onion_payload: OnionPayload,
timer_ticks: u8,
+ /// The sum total of all MPP parts
+ total_msat: u64,
}
/// A payment identifier used to uniquely identify a payment to LDK.
DuplicateClaim,
}
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
},
},
},
- ChannelError::CloseDelayBroadcast(msg) => LightningError {
- err: msg.clone(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage {
- channel_id,
- data: msg
- },
- },
- },
},
chan_id: None,
shutdown_finish: None,
// Note this is only exposed in cfg(test):
pub(super) struct ChannelHolder<Signer: Sign> {
pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
- /// SCIDs (and outbound SCID aliases) to the real channel id. Outbound SCID aliases are added
- /// here once the channel is available for normal use, with SCIDs being added once the funding
- /// transaction is confirmed at the channel's required confirmation depth.
- pub(super) short_to_id: HashMap<u64, [u8; 32]>,
- /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
- ///
- /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
- /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
- /// and via the classic SCID.
- ///
- /// Note that while this is held in the same mutex as the channels themselves, no consistency
- /// guarantees are made about the existence of a channel with the short id here, nor the short
- /// ids in the PendingHTLCInfo!
- pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
- /// Map from payment hash to any HTLCs which are to us and can be failed/claimed by the user.
- /// Note that while this is held in the same mutex as the channels themselves, no consistency
- /// guarantees are made about the channels given here actually existing anymore by the time you
- /// go to read them!
- claimable_htlcs: HashMap<PaymentHash, Vec<ClaimableHTLC>>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
Fulfilled {
session_privs: HashSet<[u8; 32]>,
payment_hash: Option<PaymentHash>,
+ timer_ticks_without_htlcs: u8,
},
/// When a payer gives up trying to retry a payment, they inform us, letting us generate a
/// `PaymentFailed` event when all HTLCs have irrevocably failed. This avoids a number of race
}
impl PendingOutboundPayment {
- fn is_retryable(&self) -> bool {
- match self {
- PendingOutboundPayment::Retryable { .. } => true,
- _ => false,
- }
- }
fn is_fulfilled(&self) -> bool {
match self {
PendingOutboundPayment::Fulfilled { .. } => true,
=> session_privs,
});
let payment_hash = self.payment_hash();
- *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash };
+ *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash, timer_ticks_without_htlcs: 0 };
}
fn mark_abandoned(&mut self) -> Result<(), ()> {
/// concrete type of the KeysManager.
///
/// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
+pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
/// concrete type of the KeysManager.
///
/// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<InMemorySigner, &'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
/// essentially you should default to using a SimpleRefChannelManager, and use a
/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
/// you're using lightning-net-tokio.
-pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- where M::Target: chain::Watch<Signer>,
+//
+// Lock order:
+// The tree structure below illustrates the lock order requirements for the different locks of the
+// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree,
+// and should then be taken in the order of the lowest to the highest level in the tree.
+// Note that locks on different branches shall not be taken at the same time, as doing so will
+// create a new lock order for those specific locks in the order they were taken.
+//
+// Lock order tree:
+//
+// `total_consistency_lock`
+// |
+// |__`forward_htlcs`
+// |
+// |__`pending_inbound_payments`
+// | |
+// | |__`claimable_htlcs`
+// | |
+// | |__`pending_outbound_payments`
+// | |
+// | |__`channel_state`
+// | |
+// | |__`id_to_peer`
+// | |
+// | |__`short_to_chan_info`
+// | |
+// | |__`per_peer_state`
+// | |
+// | |__`outbound_scid_aliases`
+// | |
+// | |__`best_block`
+// | |
+// | |__`pending_events`
+// | |
+// | |__`pending_background_events`
+//
+pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
default_configuration: UserConfig,
genesis_hash: BlockHash,
- fee_estimator: F,
+ fee_estimator: LowerBoundedFeeEstimator<F>,
chain_monitor: M,
tx_broadcaster: T,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(test)]
pub(super) best_block: RwLock<BestBlock>,
#[cfg(not(test))]
best_block: RwLock<BestBlock>,
secp_ctx: Secp256k1<secp256k1::All>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(any(test, feature = "_test_utils"))]
- pub(super) channel_state: Mutex<ChannelHolder<Signer>>,
+ pub(super) channel_state: Mutex<ChannelHolder<<K::Target as KeysInterface>::Signer>>,
#[cfg(not(any(test, feature = "_test_utils")))]
- channel_state: Mutex<ChannelHolder<Signer>>,
+ channel_state: Mutex<ChannelHolder<<K::Target as KeysInterface>::Signer>>,
/// Storage for PaymentSecrets and any requirements on future inbound payments before we will
/// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements
/// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
/// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out.
- /// Locked *after* channel_state.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
/// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
///
/// See `PendingOutboundPayment` documentation for more info.
///
- /// Locked *after* channel_state.
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
+ /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
+ ///
+ /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
+ /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
+ /// and via the classic SCID.
+ ///
+ /// Note that no consistency guarantees are made about the existence of a channel with the
+ /// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ #[cfg(test)]
+ pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+ #[cfg(not(test))]
+ forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
+
+ /// Map from payment hash to the payment data and any HTLCs which are to us and can be
+ /// failed/claimed by the user.
+ ///
+ /// Note that, no consistency guarantees are made about the channels given here actually
+ /// existing anymore by the time you go to read them!
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ claimable_htlcs: Mutex<HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>>,
+
/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
/// and some closed channels which reached a usable state prior to being closed. This is used
/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
/// active channel list on load.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
outbound_scid_aliases: Mutex<HashSet<u64>>,
+ /// `channel_id` -> `counterparty_node_id`.
+ ///
+ /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
+ /// multiple channels with the same `temporary_channel_id` to different peers can exist,
+ /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
+ ///
+ /// Note that this map should only be used for `MonitorEvent` handling, to be able to access
+ /// the corresponding channel for the event, as we only have access to the `channel_id` during
+ /// the handling of the events.
+ ///
+ /// TODO:
+ /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need
+ /// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately
+ /// would break backwards compatability.
+ /// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the
+ /// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is
+ /// required to access the channel with the `counterparty_node_id`.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
+
+ /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
+ ///
+ /// Outbound SCID aliases are added here once the channel is available for normal use, with
+ /// SCIDs being added once the funding transaction is confirmed at the channel's required
+ /// confirmation depth.
+ ///
+ /// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency
+ /// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a
+ /// channel with the `channel_id` in our other maps.
+ ///
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
+ #[cfg(test)]
+ pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+ #[cfg(not(test))]
+ short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
+
our_network_key: SecretKey,
our_network_pubkey: PublicKey,
/// [fake scids]: crate::util::scid_utils::fake_scid
fake_scid_rand_bytes: [u8; 32],
- /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
- /// value increases strictly since we don't assume access to a time source.
- last_node_announcement_serial: AtomicUsize,
+ /// When we send payment probes, we generate the [`PaymentHash`] based on this cookie secret
+ /// and a random [`PaymentId`]. This allows us to discern probes from real payments, without
+ /// keeping additional state.
+ probing_cookie_secret: [u8; 32],
/// The highest block timestamp we've seen, which is usually a good guess at the current time.
/// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
/// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
/// new channel.
///
- /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_events: Mutex<Vec<events::Event>>,
+ /// See `ChannelManager` struct-level documentation for lock order requirements.
pending_background_events: Mutex<Vec<BackgroundEvent>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
/// Taken first everywhere where we are making changes before any other locks.
/// When acquiring this lock in read mode, rather than acquiring it directly, call
/// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
- /// PersistenceNotifier the lock contains sends out a notification when the lock is released.
+ /// Notifier the lock contains sends out a notification when the lock is released.
total_consistency_lock: RwLock<()>,
- persistence_notifier: PersistenceNotifier,
+ persistence_notifier: Notifier,
keys_manager: K,
/// notify or not based on whether relevant changes have been made, providing a closure to
/// `optionally_notify` which returns a `NotifyOption`.
struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
- persistence_notifier: &'a PersistenceNotifier,
+ persistence_notifier: &'a Notifier,
should_persist: F,
// We hold onto this result so the lock doesn't get released immediately.
_read_guard: RwLockReadGuard<'a, ()>,
}
impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
- fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+ fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a Notifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
}
- fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
+ fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
let read_guard = lock.read().unwrap();
PersistenceNotifierGuard {
// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
-pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
+// This should be long enough to allow a payment path drawn across multiple routing hops with substantial
+// `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node
+// in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to
+// scale them up to suit its security policy. At the network-level, we shouldn't constrain them too much,
+// while avoiding to introduce a DoS vector. Further, a low CTLV_FAR_FAR_AWAY could be a source of
+// routing failure for any HTLC sender picking up an LDK node among the first hops.
+pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
/// Minimum CLTV difference between the current block height and received inbound payments.
/// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
-/// The number of blocks before we consider an outbound payment for expiry if it doesn't have any
-/// pending HTLCs in flight.
-pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
-
/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the
+/// idempotency of payments by [`PaymentId`]. See
+/// [`ChannelManager::remove_stale_resolved_payments`].
+pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+
/// Information needed for constructing an invoice route hint for this channel.
#[derive(Clone, Debug, PartialEq)]
pub struct CounterpartyForwardingInfo {
/// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound
/// payments instead of this. See [`get_inbound_payment_scid`].
///
+ /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may
+ /// be used in place of this in outbound routes. See [`get_outbound_payment_scid`].
+ ///
/// [`inbound_scid_alias`]: Self::inbound_scid_alias
+ /// [`outbound_scid_alias`]: Self::outbound_scid_alias
/// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid
+ /// [`get_outbound_payment_scid`]: Self::get_outbound_payment_scid
+ /// [`confirmations_required`]: Self::confirmations_required
pub short_channel_id: Option<u64>,
+ /// An optional [`short_channel_id`] alias for this channel, randomly generated by us and
+ /// usable in place of [`short_channel_id`] to reference the channel in outbound routes when
+ /// the channel has not yet been confirmed (as long as [`confirmations_required`] is
+ /// `Some(0)`).
+ ///
+ /// This will be `None` as long as the channel is not available for routing outbound payments.
+ ///
+ /// [`short_channel_id`]: Self::short_channel_id
+ /// [`confirmations_required`]: Self::confirmations_required
+ pub outbound_scid_alias: Option<u64>,
/// An optional [`short_channel_id`] alias for this channel, randomly generated by our
/// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our
/// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
pub force_close_spend_delay: Option<u16>,
/// True if the channel was initiated (and thus funded) by us.
pub is_outbound: bool,
- /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
- /// channel is not currently being shut down. `funding_locked` message exchange implies the
+ /// True if the channel is confirmed, channel_ready messages have been exchanged, and the
+ /// channel is not currently being shut down. `channel_ready` message exchange implies the
/// required confirmation count has been reached (and we were connected to the peer at some
/// point after the funding transaction received enough confirmations). The required
/// confirmation count is provided in [`confirmations_required`].
///
/// [`confirmations_required`]: ChannelDetails::confirmations_required
- pub is_funding_locked: bool,
- /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
+ pub is_channel_ready: bool,
+ /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
/// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
///
- /// This is a strict superset of `is_funding_locked`.
+ /// This is a strict superset of `is_channel_ready`.
pub is_usable: bool,
/// True if this channel is (or will be) publicly-announced.
pub is_public: bool,
pub inbound_htlc_minimum_msat: Option<u64>,
/// The largest value HTLC (in msat) we currently will accept, for this channel.
pub inbound_htlc_maximum_msat: Option<u64>,
+ /// Set of configurable parameters that affect channel operation.
+ ///
+ /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
+ pub config: Option<ChannelConfig>,
}
impl ChannelDetails {
pub fn get_inbound_payment_scid(&self) -> Option<u64> {
self.inbound_scid_alias.or(self.short_channel_id)
}
+
+ /// Gets the current SCID which should be used to identify this channel for outbound payments.
+ /// This should be used in [`Route`]s to describe the first hop or in other contexts where
+ /// we're sending or forwarding a payment outbound over this channel.
+ ///
+ /// This is either the [`ChannelDetails::short_channel_id`], if set, or the
+ /// [`ChannelDetails::outbound_scid_alias`]. See those for more information.
+ pub fn get_outbound_payment_scid(&self) -> Option<u64> {
+ self.short_channel_id.or(self.outbound_scid_alias)
+ }
}
/// If a payment fails to send, it can be in one of several states. This enum is returned as the
/// All paths which were attempted failed to send, with no channel state change taking place.
/// You can freely retry the payment in full (though you probably want to do so over different
/// paths than the ones selected).
+ ///
+ /// [`ChannelManager::abandon_payment`] does *not* need to be called for this payment and
+ /// [`ChannelManager::retry_payment`] will *not* work for this payment.
AllFailedRetrySafe(Vec<APIError>),
/// Some paths which were attempted failed to send, though possibly not all. At least some
/// paths have irrevocably committed to the HTLC and retrying the payment in full would result
/// in over-/re-payment.
///
/// The results here are ordered the same as the paths in the route object which was passed to
- /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely
- /// retried (though there is currently no API with which to do so).
+ /// send_payment, and any `Err`s which are not [`APIError::MonitorUpdateInProgress`] can be
+ /// safely retried via [`ChannelManager::retry_payment`].
///
- /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
- /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
- /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel
- /// with the latest update_id.
+ /// Any entries which contain `Err(APIError::MonitorUpdateInprogress)` or `Ok(())` MUST NOT be
+ /// retried as they will result in over-/re-payment. These HTLCs all either successfully sent
+ /// (in the case of `Ok(())`) or will send once a [`MonitorEvent::Completed`] is provided for
+ /// the next-hop channel with the latest update_id.
PartialFailure {
/// The errors themselves, in the same order as the route hops.
results: Vec<Result<(), APIError>>,
}
macro_rules! update_maps_on_chan_removal {
- ($self: expr, $short_to_id: expr, $channel: expr) => {
+ ($self: expr, $channel: expr) => {{
+ $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+ let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
if let Some(short_id) = $channel.get_short_channel_id() {
- $short_to_id.remove(&short_id);
+ short_to_chan_info.remove(&short_id);
} else {
// If the channel was never confirmed on-chain prior to its closure, remove the
// outbound SCID alias we used for it from the collision-prevention set. While we
let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
debug_assert!(alias_removed);
}
- $short_to_id.remove(&$channel.outbound_scid_alias());
- }
+ short_to_chan_info.remove(&$channel.outbound_scid_alias());
+ }}
}
/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
macro_rules! convert_chan_err {
- ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
+ ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => {
match $err {
ChannelError::Warn(msg) => {
(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
},
ChannelError::Close(msg) => {
log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
- update_maps_on_chan_removal!($self, $short_to_id, $channel);
+ update_maps_on_chan_removal!($self, $channel);
let shutdown_res = $channel.force_shutdown(true);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
},
- ChannelError::CloseDelayBroadcast(msg) => {
- log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
- update_maps_on_chan_removal!($self, $short_to_id, $channel);
- let shutdown_res = $channel.force_shutdown(false);
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
- shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
- }
}
}
}
macro_rules! break_chan_entry {
- ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
+ ($self: ident, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+ let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
if drop {
$entry.remove_entry();
}
}
macro_rules! try_chan_entry {
- ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
+ ($self: ident, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+ let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
if drop {
$entry.remove_entry();
}
}
macro_rules! remove_channel {
- ($self: expr, $channel_state: expr, $entry: expr) => {
+ ($self: expr, $entry: expr) => {
{
let channel = $entry.remove_entry().1;
- update_maps_on_chan_removal!($self, $channel_state.short_to_id, channel);
+ update_maps_on_chan_removal!($self, channel);
channel
}
}
}
-macro_rules! handle_monitor_err {
- ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
+macro_rules! handle_monitor_update_res {
+ ($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
match $err {
- ChannelMonitorUpdateErr::PermanentFailure => {
- log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
- update_maps_on_chan_removal!($self, $short_to_id, $chan);
+ ChannelMonitorUpdateStatus::PermanentFailure => {
+ log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
+ update_maps_on_chan_removal!($self, $chan);
// TODO: $failed_fails is dropped here, which will cause other channels to hit the
// chain in a confused state! We need to move them into the ChannelMonitor which
// will be responsible for failing backwards once things confirm on-chain.
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
- $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
+ $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
(res, true)
},
- ChannelMonitorUpdateErr::TemporaryFailure => {
- log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
+ ChannelMonitorUpdateStatus::InProgress => {
+ log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
log_bytes!($chan_id[..]),
if $resend_commitment && $resend_raa {
match $action_type {
if !$resend_raa {
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
}
- $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
+ $chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
(Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
},
+ ChannelMonitorUpdateStatus::Completed => {
+ (Ok(()), false)
+ },
}
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
- let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
+ let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
if drop {
$entry.remove_entry();
}
res
} };
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+ handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
} };
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
+ handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new(), Vec::new())
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
+ handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
- handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new())
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
+ handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
};
-}
-
-macro_rules! return_monitor_err {
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
+ ($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
- return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
- }
-}
-
-// Does not break in case of TemporaryFailure!
-macro_rules! maybe_break_monitor_err {
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
- match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
- (e, ChannelMonitorUpdateErr::PermanentFailure) => {
- break e;
- },
- (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
- }
- }
}
-macro_rules! send_funding_locked {
- ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $funding_locked_msg: expr) => {
- $pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+macro_rules! send_channel_ready {
+ ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
+ $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
node_id: $channel.get_counterparty_node_id(),
- msg: $funding_locked_msg,
+ msg: $channel_ready_msg,
});
- // Note that we may send a funding locked multiple times for a channel if we reconnect, so
+ // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
// we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
- let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id());
- assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(),
+ let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
+ let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
+ assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
if let Some(real_scid) = $channel.get_short_channel_id() {
- let scid_insert = $short_to_id.insert(real_scid, $channel.channel_id());
- assert!(scid_insert.is_none() || scid_insert.unwrap() == $channel.channel_id(),
+ let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
+ assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
}
+ }}
+}
+
+macro_rules! emit_channel_ready_event {
+ ($self: expr, $channel: expr) => {
+ if $channel.should_emit_channel_ready_event() {
+ {
+ let mut pending_events = $self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::ChannelReady {
+ channel_id: $channel.channel_id(),
+ user_channel_id: $channel.get_user_id(),
+ counterparty_node_id: $channel.get_counterparty_node_id(),
+ channel_type: $channel.get_channel_type().clone(),
+ });
+ }
+ $channel.set_channel_ready_event_emitted();
+ }
}
}
macro_rules! handle_chan_restoration_locked {
($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
$raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
- $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr, $announcement_sigs: expr) => { {
+ $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
let mut htlc_forwards = None;
let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
let res = loop {
let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
if !forwards.is_empty() {
- htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
+ htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()),
$channel_entry.get().get_funding_txo().unwrap(), forwards));
}
if chanmon_update.is_some() {
- // On reconnect, we, by definition, only resend a funding_locked if there have been
+ // On reconnect, we, by definition, only resend a channel_ready if there have been
// no commitment updates, so the only channel monitor update which could also be
- // associated with a funding_locked would be the funding_created/funding_signed
+ // associated with a channel_ready would be the funding_created/funding_signed
// monitor update. That monitor update failing implies that we won't send
- // funding_locked until it's been updated, so we can't have a funding_locked and a
+ // channel_ready until it's been updated, so we can't have a channel_ready and a
// monitor update here (so we don't bother to handle it correctly below).
- assert!($funding_locked.is_none());
- // A channel monitor update makes no sense without either a funding_locked or a
- // commitment update to process after it. Since we can't have a funding_locked, we
+ assert!($channel_ready.is_none());
+ // A channel monitor update makes no sense without either a channel_ready or a
+ // commitment update to process after it. Since we can't have a channel_ready, we
// only bother to handle the monitor-update + commitment_update case below.
assert!($commitment_update.is_some());
}
- if let Some(msg) = $funding_locked {
- // Similar to the above, this implies that we're letting the funding_locked fly
+ if let Some(msg) = $channel_ready {
+ // Similar to the above, this implies that we're letting the channel_ready fly
// before it should be allowed to.
assert!(chanmon_update.is_none());
- send_funding_locked!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg);
+ send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
}
if let Some(msg) = $announcement_sigs {
$channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
});
}
+ emit_channel_ready_event!($self, $channel_entry.get_mut());
+
let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
if let Some(monitor_update) = chanmon_update {
// We only ever broadcast a funding transaction in response to a funding_signed
// only case where we can get a new ChannelMonitorUpdate would be if we also
// have some commitment updates to send as well.
assert!($commitment_update.is_some());
- if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- let mut order = $order;
- if $raa.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
+ match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ // channel_reestablish doesn't guarantee the order it returns is sensical
+ // for the messages it returns, but if we're setting what messages to
+ // re-transmit on monitor update success, we need to make sure it is sane.
+ let mut order = $order;
+ if $raa.is_none() {
+ order = RAACommitmentOrder::CommitmentFirst;
+ }
+ break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
}
- break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
}
}
} }
}
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
///
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
///
- /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
- ///
/// Users need to notify the new ChannelManager when a new block is connected or
/// disconnected using its `block_connected` and `block_disconnected` methods, starting
/// from after `params.latest_hash`.
ChannelManager {
default_configuration: config.clone(),
genesis_hash: genesis_block(params.network).header.block_hash(),
- fee_estimator: fee_est,
+ fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
chain_monitor,
tx_broadcaster,
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
- short_to_id: HashMap::new(),
- forward_htlcs: HashMap::new(),
- claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
}),
outbound_scid_aliases: Mutex::new(HashSet::new()),
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: Mutex::new(HashMap::new()),
+ forward_htlcs: Mutex::new(HashMap::new()),
+ claimable_htlcs: Mutex::new(HashMap::new()),
+ id_to_peer: Mutex::new(HashMap::new()),
+ short_to_chan_info: FairRwLock::new(HashMap::new()),
our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(),
our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()),
inbound_payment_key: expanded_inbound_key,
fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(),
- last_node_announcement_serial: AtomicUsize::new(0),
+ probing_cookie_secret: keys_manager.get_secure_random_bytes(),
+
highest_seen_timestamp: AtomicUsize::new(0),
per_peer_state: RwLock::new(HashMap::new()),
pending_events: Mutex::new(Vec::new()),
pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
- persistence_notifier: PersistenceNotifier::new(),
+ persistence_notifier: Notifier::new(),
keys_manager,
}
}
- /// Gets the current configuration applied to all new channels, as
+ /// Gets the current configuration applied to all new channels.
pub fn get_current_default_configuration(&self) -> &UserConfig {
&self.default_configuration
}
Ok(temporary_channel_id)
}
- fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<Signer>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
+ fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<K::Target as KeysInterface>::Signer>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
let mut res = Vec::new();
{
let channel_state = self.channel_state.lock().unwrap();
// `have_received_message` indicates that type negotiation has completed.
channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
short_channel_id: channel.get_short_channel_id(),
+ outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
inbound_scid_alias: channel.latest_inbound_scid_alias(),
channel_value_satoshis: channel.get_value_satoshis(),
unspendable_punishment_reserve: to_self_reserve_satoshis,
confirmations_required: channel.minimum_depth(),
force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
is_outbound: channel.is_outbound(),
- is_funding_locked: channel.is_usable(),
+ is_channel_ready: channel.is_usable(),
is_usable: channel.is_live(),
is_public: channel.should_announce(),
inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
- inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat()
+ inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
+ config: Some(channel.config()),
});
}
}
self.list_channels_with_filter(|_| true)
}
- /// Gets the list of usable channels, in random order. Useful as an argument to
- /// get_route to ensure non-announced channels are used.
+ /// Gets the list of usable channels, in random order. Useful as an argument to [`find_route`]
+ /// to ensure non-announced channels are used.
///
/// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
/// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
/// are.
+ ///
+ /// [`find_route`]: crate::routing::router::find_route
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
// Note we use is_live here instead of usable which leads to somewhat confused
// internal/external nomenclature, but that's ok cause that's probably what the user
}
/// Helper function that issues the channel close events
- fn issue_channel_close_events(&self, channel: &Channel<Signer>, closure_reason: ClosureReason) {
+ fn issue_channel_close_events(&self, channel: &Channel<<K::Target as KeysInterface>::Signer>, closure_reason: ClosureReason) {
let mut pending_events_lock = self.pending_events.lock().unwrap();
match channel.unbroadcasted_funding() {
Some(transaction) => {
});
}
- fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
+ fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let counterparty_node_id;
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
- counterparty_node_id = chan_entry.get().get_counterparty_node_id();
- let per_peer_state = self.per_peer_state.read().unwrap();
- let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
- Some(peer_state) => {
- let peer_state = peer_state.lock().unwrap();
- let their_features = &peer_state.latest_features;
- chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
- },
- None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
+ return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
+ }
+ let (shutdown_msg, monitor_update, htlcs) = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ match per_peer_state.get(&counterparty_node_id) {
+ Some(peer_state) => {
+ let peer_state = peer_state.lock().unwrap();
+ let their_features = &peer_state.latest_features;
+ chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
+ },
+ None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
+ }
};
failed_htlcs = htlcs;
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update {
- if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
- let (result, is_permanent) =
- handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, channel_state, chan_entry);
- break result;
- }
+ let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+ let (result, is_permanent) =
+ handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ if is_permanent {
+ remove_channel!(self, chan_entry);
+ break result;
}
}
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id,
+ node_id: *counterparty_node_id,
msg: shutdown_msg
});
if chan_entry.get().is_shutdown() {
- let channel = remove_channel!(self, channel_state, chan_entry);
+ let channel = remove_channel!(self, chan_entry);
if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: channel_update
};
for htlc_source in failed_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
+ self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
- let _ = handle_error!(self, result, counterparty_node_id);
+ let _ = handle_error!(self, result, *counterparty_node_id);
Ok(())
}
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
- pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
- self.close_channel_internal(channel_id, None)
+ pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, counterparty_node_id, None)
}
/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
/// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
/// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
- pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
- self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight))
+ pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
+ self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
}
#[inline]
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+ self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
if let Some((funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
}
}
- /// `peer_node_id` should be set when we receive a message from a peer, but not set when the
+ /// `peer_msg` should be set when we receive a message from a peer, but not set when the
/// user closes, which will be re-exposed as the `ChannelClosed` reason.
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
+ -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
- if let Some(node_id) = peer_node_id {
- if chan.get().get_counterparty_node_id() != *node_id {
- return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
- }
+ if chan.get().get_counterparty_node_id() != *peer_node_id {
+ return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
- if peer_node_id.is_some() {
- if let Some(peer_msg) = peer_msg {
- self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
- }
+ if let Some(peer_msg) = peer_msg {
+ self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
} else {
self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
}
- remove_channel!(self, channel_state, chan)
+ remove_channel!(self, chan)
} else {
return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
};
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
- self.finish_force_close_channel(chan.force_shutdown(true));
+ self.finish_force_close_channel(chan.force_shutdown(broadcast));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
Ok(chan.get_counterparty_node_id())
}
- /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
- /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
- pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- match self.force_close_channel_with_peer(channel_id, None, None) {
+ match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
}
}
+ /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
+ /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
+ /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
+ /// channel.
+ pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+ -> Result<(), APIError> {
+ self.force_close_sending_error(channel_id, counterparty_node_id, true)
+ }
+
+ /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
+ /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
+ /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
+ ///
+ /// You can always get the latest local transaction(s) to broadcast from
+ /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
+ pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
+ -> Result<(), APIError> {
+ self.force_close_sending_error(channel_id, counterparty_node_id, false)
+ }
+
/// Force close all channels, immediately broadcasting the latest local commitment transaction
/// for each to the chain and rejecting new HTLCs on each.
- pub fn force_close_all_channels(&self) {
+ pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
+ for chan in self.list_channels() {
+ let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
+ }
+ }
+
+ /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
+ /// local transaction(s).
+ pub fn force_close_all_channels_without_broadcasting_txn(&self) {
for chan in self.list_channels() {
- let _ = self.force_close_channel(&chan.channel_id);
+ let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
}
}
})
}
- fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder<Signer>>) {
+ fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
macro_rules! return_malformed_err {
($msg: expr, $err_code: expr) => {
{
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
failure_code: $err_code,
- })), self.channel_state.lock().unwrap());
+ }));
}
}
}
return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
}
- let shared_secret = {
- let mut arr = [0; 32];
- arr.copy_from_slice(&SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]);
- arr
- };
+ let shared_secret = SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key).secret_bytes();
if msg.onion_routing_packet.version != 0 {
//TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
//node knows the HMAC matched, so they already know what is there...
return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
}
-
- let mut channel_state = None;
macro_rules! return_err {
($msg: expr, $err_code: expr, $data: expr) => {
{
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- if channel_state.is_none() {
- channel_state = Some(self.channel_state.lock().unwrap());
- }
- return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
- })), channel_state.unwrap());
+ }));
}
}
}
- let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+ let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
return_malformed_err!(err_msg, err_code);
}
},
onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
- let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
-
- let blinding_factor = {
- let mut sha = Sha256::engine();
- sha.input(&new_pubkey.serialize()[..]);
- sha.input(&shared_secret);
- Sha256::from_engine(sha).into_inner()
- };
-
- let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
- Err(e)
- } else { Ok(new_pubkey) };
-
+ let new_pubkey = msg.onion_routing_packet.public_key.unwrap();
let outgoing_packet = msgs::OnionPacket {
version: 0,
- public_key,
+ public_key: onion_utils::next_hop_packet_pubkey(&self.secp_ctx, new_pubkey, &shared_secret),
hop_data: new_packet_bytes,
hmac: next_hop_hmac.clone(),
};
}
};
- channel_state = Some(self.channel_state.lock().unwrap());
if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
// If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
// with a short_channel_id of 0. This is important as various things later assume
// short_channel_id is non-0 in any ::Forward.
if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
- let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
if let Some((err, code, chan_update)) = loop {
+ let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned();
+ let mut channel_state = self.channel_state.lock().unwrap();
let forwarding_id_opt = match id_option {
None => { // unknown_next_peer
// Note that this is likely a timing oracle for detecting whether an scid is a
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
}
},
- Some(id) => Some(id.clone()),
+ Some((_cp_id, chan_id)) => Some(chan_id.clone()),
};
- let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt {
- let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+ let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt {
+ let chan = match channel_state.by_id.get_mut(&forwarding_id) {
+ None => {
+ // Channel was removed. The short_to_chan_info and by_id maps have
+ // no consistency guarantees.
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ },
+ Some(chan) => chan
+ };
if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
// Note that the behavior here should be identical to the above block - we
// should NOT reveal the existence or non-existence of a private channel if
if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
}
- let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64)
- .and_then(|prop_fee| { (prop_fee / 1000000)
- .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) });
- if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
- break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, chan_update_opt));
+ if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *amt_to_forward, *outgoing_cltv_value) {
+ break Some((err, code, chan_update_opt));
+ }
+ chan_update_opt
+ } else {
+ if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+ break Some((
+ "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+ 0x1000 | 13, None,
+ ));
}
- (chan_update_opt, chan.get_cltv_expiry_delta())
- } else { (None, MIN_CLTV_EXPIRY_DELTA) };
+ None
+ };
- if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + forwardee_cltv_expiry_delta as u64 { // incorrect_cltv_expiry
- break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, chan_update_opt));
- }
let cur_height = self.best_block.read().unwrap().height() + 1;
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
// but we want to be robust wrt to counterparty packet sanitization (see
break None;
}
{
- let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 8 + 2));
+ let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
if let Some(chan_update) = chan_update {
if code == 0x1000 | 11 || code == 0x1000 | 12 {
msg.amount_msat.write(&mut res).expect("Writes cannot fail");
msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
}
else if code == 0x1000 | 20 {
- // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791
+ // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
0u16.write(&mut res).expect("Writes cannot fail");
}
- (chan_update.serialized_length() as u16).write(&mut res).expect("Writes cannot fail");
+ (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
+ msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
chan_update.write(&mut res).expect("Writes cannot fail");
}
return_err!(err, code, &res.0[..]);
}
}
- (pending_forward_info, channel_state.unwrap())
+ pending_forward_info
}
/// Gets the current channel_update for the given channel. This first checks if the channel is
/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
///
/// May be called with channel_state already locked!
- fn get_channel_update_for_broadcast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ fn get_channel_update_for_broadcast(&self, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.should_announce() {
return Err(LightningError {
err: "Cannot broadcast a channel_update for a private channel".to_owned(),
action: msgs::ErrorAction::IgnoreError
});
}
+ if chan.get_short_channel_id().is_none() {
+ return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
+ }
log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
self.get_channel_update_for_unicast(chan)
}
/// and thus MUST NOT be called unless the recipient of the resulting message has already
/// provided evidence that they know about the existence of the channel.
/// May be called with channel_state already locked!
- fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ fn get_channel_update_for_unicast(&self, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
- let short_channel_id = match chan.get_short_channel_id() {
+ let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
};
self.get_channel_update_for_onion(short_channel_id, chan)
}
- fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
cltv_expiry_delta: chan.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
- htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
+ htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
fee_proportional_millionths: chan.get_fee_proportional_millionths(),
excess_data: Vec::new(),
};
let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
- let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
+ let sig = self.secp_ctx.sign_ecdsa(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
Ok(msgs::ChannelUpdate {
signature: sig,
}
// Only public for testing, this should otherwise never be called direcly
- pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
+ pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
let prng_seed = self.keys_manager.get_secure_random_bytes();
- let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let err: Result<(), _> = loop {
- let mut channel_lock = self.channel_state.lock().unwrap();
-
- let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
- let payment_entry = pending_outbounds.entry(payment_id);
- if let hash_map::Entry::Occupied(payment) = &payment_entry {
- if !payment.get().is_retryable() {
- return Err(APIError::RouteError {
- err: "Payment already completed"
- });
- }
- }
-
- let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
+ let id = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
- Some(id) => id.clone(),
+ Some((_cp_id, chan_id)) => chan_id.clone(),
};
- macro_rules! insert_outbound_payment {
- () => {
- let payment = payment_entry.or_insert_with(|| PendingOutboundPayment::Retryable {
- session_privs: HashSet::new(),
- pending_amt_msat: 0,
- pending_fee_msat: Some(0),
- payment_hash: *payment_hash,
- payment_secret: *payment_secret,
- starting_block_height: self.best_block.read().unwrap().height(),
- total_msat: total_value,
- });
- assert!(payment.insert(session_priv_bytes, path));
- }
- }
-
+ let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
match {
payment_secret: payment_secret.clone(),
payment_params: payment_params.clone(),
}, onion_packet, &self.logger),
- channel_state, chan)
+ chan)
} {
Some((update_add, commitment_signed, monitor_update)) => {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
- // Note that MonitorUpdateFailed here indicates (per function docs)
- // that we will resend the commitment update once monitor updating
- // is restored. Therefore, we must return an error indicating that
- // it is unsafe to retry the payment wholesale, which we do in the
- // send_payment check for MonitorUpdateFailed, below.
- insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
- return Err(APIError::MonitorUpdateFailed);
+ let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+ let chan_id = chan.get().channel_id();
+ match (update_err,
+ handle_monitor_update_res!(self, update_err, chan,
+ RAACommitmentOrder::CommitmentFirst, false, true))
+ {
+ (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
+ (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
+ (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
+ // Note that MonitorUpdateInProgress here indicates (per function
+ // docs) that we will resend the commitment update once monitor
+ // updating completes. Therefore, we must return an error
+ // indicating that it is unsafe to retry the payment wholesale,
+ // which we do in the send_payment check for
+ // MonitorUpdateInProgress, below.
+ return Err(APIError::MonitorUpdateInProgress);
+ },
+ _ => unreachable!(),
}
- insert_outbound_payment!();
- log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
+ log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: path.first().unwrap().pubkey,
updates: msgs::CommitmentUpdate {
},
});
},
- None => { insert_outbound_payment!(); },
+ None => { },
}
- } else { unreachable!(); }
+ } else {
+ // The channel was likely removed after we fetched the id from the
+ // `short_to_chan_info` map, but before we successfully locked the `by_id` map.
+ // This can occur as no consistency guarantees exists between the two maps.
+ return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
+ }
return Ok(());
};
/// Value parameters are provided via the last hop in route, see documentation for RouteHop
/// fields for more info.
///
- /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
- /// payment), we don't do anything to stop you! We always try to ensure that if the provided
- /// next hop knows the preimage to payment_hash they can claim an additional amount as
- /// specified in the last hop in the route! Thus, you should probably do your own
- /// payment_preimage tracking (which you should already be doing as they represent "proof of
- /// payment") and prevent double-sends yourself.
+ /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
+ /// method will error with an [`APIError::RouteError`]. Note, however, that once a payment
+ /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an
+ /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same
+ /// [`PaymentId`].
+ ///
+ /// Thus, in order to ensure duplicate payments are not sent, you should implement your own
+ /// tracking of payments, including state to indicate once a payment has completed. Because you
+ /// should also ensure that [`PaymentHash`]es are not re-used, for simplicity, you should
+ /// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the
+ /// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes.
///
- /// May generate SendHTLCs message(s) event on success, which should be relayed.
+ /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via
+ /// [`PeerManager::process_events`]).
///
/// Each path may have a different return value, and PaymentSendValue may return a Vec with
/// each entry matching the corresponding-index entry in the route paths, see
/// PaymentSendFailure for more info.
///
/// In general, a path may raise:
- /// * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee,
+ /// * [`APIError::RouteError`] when an invalid route or forwarding parameter (cltv_delta, fee,
/// node public key) is specified.
- /// * APIError::ChannelUnavailable if the next-hop channel is not available for updates
+ /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
/// (including due to previous monitor update failure or new permanent monitor update
/// failure).
- /// * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
+ /// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
/// relevant updates.
///
/// Note that depending on the type of the PaymentSendFailure the HTLC may have been
/// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route
/// must not contain multiple paths as multi-path payments require a recipient-provided
/// payment_secret.
+ ///
/// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
/// bit set (either as required or as available). If multiple paths are present in the Route,
/// we assume the invoice had the basic_mpp feature set.
- pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<PaymentId, PaymentSendFailure> {
- self.send_payment_internal(route, payment_hash, payment_secret, None, None, None)
+ ///
+ /// [`Event::PaymentSent`]: events::Event::PaymentSent
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+ pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
+ let onion_session_privs = self.add_new_pending_payment(payment_hash, *payment_secret, payment_id, route)?;
+ self.send_payment_internal(route, payment_hash, payment_secret, None, payment_id, None, onion_session_privs)
+ }
+
+ #[cfg(test)]
+ pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
+ self.add_new_pending_payment(payment_hash, payment_secret, payment_id, route)
+ }
+
+ fn add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
+ let mut onion_session_privs = Vec::with_capacity(route.paths.len());
+ for _ in 0..route.paths.len() {
+ onion_session_privs.push(self.keys_manager.get_secure_random_bytes());
+ }
+
+ let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+ match pending_outbounds.entry(payment_id) {
+ hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::ParameterError(APIError::RouteError {
+ err: "Payment already in progress"
+ })),
+ hash_map::Entry::Vacant(entry) => {
+ let payment = entry.insert(PendingOutboundPayment::Retryable {
+ session_privs: HashSet::new(),
+ pending_amt_msat: 0,
+ pending_fee_msat: Some(0),
+ payment_hash,
+ payment_secret,
+ starting_block_height: self.best_block.read().unwrap().height(),
+ total_msat: route.get_total_amount(),
+ });
+
+ for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+ assert!(payment.insert(*session_priv_bytes, path));
+ }
+
+ Ok(onion_session_privs)
+ },
+ }
}
- fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: Option<PaymentId>, recv_value_msat: Option<u64>) -> Result<PaymentId, PaymentSendFailure> {
+ fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
if route.paths.len() < 1 {
return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
}
- if route.paths.len() > 10 {
- // This limit is completely arbitrary - there aren't any real fundamental path-count
- // limits. After we support retrying individual paths we should likely bump this, but
- // for now more than 10 paths likely carries too much one-path failure.
- return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
- }
if payment_secret.is_none() && route.paths.len() > 1 {
return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
}
let mut total_value = 0;
let our_node_id = self.get_our_node_id();
let mut path_errs = Vec::with_capacity(route.paths.len());
- let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) };
'path_check: for path in route.paths.iter() {
if path.len() < 1 || path.len() > 20 {
path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
let cur_height = self.best_block.read().unwrap().height() + 1;
let mut results = Vec::new();
- for path in route.paths.iter() {
- results.push(self.send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage));
+ debug_assert_eq!(route.paths.len(), onion_session_privs.len());
+ for (path, session_priv) in route.paths.iter().zip(onion_session_privs.into_iter()) {
+ let mut path_res = self.send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage, session_priv);
+ match path_res {
+ Ok(_) => {},
+ Err(APIError::MonitorUpdateInProgress) => {
+ // While a MonitorUpdateInProgress is an Err(_), the payment is still
+ // considered "in flight" and we shouldn't remove it from the
+ // PendingOutboundPayment set.
+ },
+ Err(_) => {
+ let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+ if let Some(payment) = pending_outbounds.get_mut(&payment_id) {
+ let removed = payment.remove(&session_priv, Some(path));
+ debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers");
+ } else {
+ debug_assert!(false, "This can't happen as the payment was added by callers");
+ path_res = Err(APIError::APIMisuseError { err: "Internal error: payment disappeared during processing. Please report this bug!".to_owned() });
+ }
+ }
+ }
+ results.push(path_res);
}
let mut has_ok = false;
let mut has_err = false;
for (res, path) in results.iter().zip(route.paths.iter()) {
if res.is_ok() { has_ok = true; }
if res.is_err() { has_err = true; }
- if let &Err(APIError::MonitorUpdateFailed) = res {
- // MonitorUpdateFailed is inherently unsafe to retry, so we call it a
+ if let &Err(APIError::MonitorUpdateInProgress) = res {
+ // MonitorUpdateInProgress is inherently unsafe to retry, so we call it a
// PartialFailure.
has_err = true;
has_ok = true;
} else { None },
})
} else if has_err {
- // If we failed to send any paths, we shouldn't have inserted the new PaymentId into
- // our `pending_outbound_payments` map at all.
- debug_assert!(self.pending_outbound_payments.lock().unwrap().get(&payment_id).is_none());
+ // If we failed to send any paths, we should remove the new PaymentId from the
+ // `pending_outbound_payments` map, as the user isn't expected to `abandon_payment`.
+ let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some();
+ debug_assert!(removed, "We should always have a pending payment to remove here");
Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
} else {
- Ok(payment_id)
+ Ok(())
}
}
}
}
+ let mut onion_session_privs = Vec::with_capacity(route.paths.len());
+ for _ in 0..route.paths.len() {
+ onion_session_privs.push(self.keys_manager.get_secure_random_bytes());
+ }
+
let (total_msat, payment_hash, payment_secret) = {
- let outbounds = self.pending_outbound_payments.lock().unwrap();
- if let Some(payment) = outbounds.get(&payment_id) {
- match payment {
- PendingOutboundPayment::Retryable {
- total_msat, payment_hash, payment_secret, pending_amt_msat, ..
- } => {
- let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
- if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+ let mut outbounds = self.pending_outbound_payments.lock().unwrap();
+ match outbounds.get_mut(&payment_id) {
+ Some(payment) => {
+ let res = match payment {
+ PendingOutboundPayment::Retryable {
+ total_msat, payment_hash, payment_secret, pending_amt_msat, ..
+ } => {
+ let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
+ if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+ }))
+ }
+ (*total_msat, *payment_hash, *payment_secret)
+ },
+ PendingOutboundPayment::Legacy { .. } => {
return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
+ err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
}))
- }
- (*total_msat, *payment_hash, *payment_secret)
- },
- PendingOutboundPayment::Legacy { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
- }))
- },
- PendingOutboundPayment::Fulfilled { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Payment already completed".to_owned()
- }));
- },
- PendingOutboundPayment::Abandoned { .. } => {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: "Payment already abandoned (with some HTLCs still pending)".to_owned()
- }));
- },
- }
- } else {
- return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
- err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
- }))
+ },
+ PendingOutboundPayment::Fulfilled { .. } => {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "Payment already completed".to_owned()
+ }));
+ },
+ PendingOutboundPayment::Abandoned { .. } => {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "Payment already abandoned (with some HTLCs still pending)".to_owned()
+ }));
+ },
+ };
+ for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+ assert!(payment.insert(*session_priv_bytes, path));
+ }
+ res
+ },
+ None =>
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
+ })),
}
};
- return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ())
+ self.send_payment_internal(route, payment_hash, &payment_secret, None, payment_id, Some(total_msat), onion_session_privs)
}
/// Signals that no further retries for the given payment will occur.
/// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
/// never reach the recipient.
///
- /// See [`send_payment`] documentation for more details on the return value of this function.
+ /// See [`send_payment`] documentation for more details on the return value of this function
+ /// and idempotency guarantees provided by the [`PaymentId`] key.
///
/// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
/// [`send_payment`] for more information about the risks of duplicate preimage usage.
/// Note that `route` must have exactly one path.
///
/// [`send_payment`]: Self::send_payment
- pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+ pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
let preimage = match payment_preimage {
Some(p) => p,
None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
};
let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
- match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) {
- Ok(payment_id) => Ok((payment_hash, payment_id)),
+ let onion_session_privs = self.add_new_pending_payment(payment_hash, None, payment_id, &route)?;
+
+ match self.send_payment_internal(route, payment_hash, &None, Some(preimage), payment_id, None, onion_session_privs) {
+ Ok(()) => Ok(payment_hash),
+ Err(e) => Err(e)
+ }
+ }
+
+ /// Send a payment that is probing the given route for liquidity. We calculate the
+ /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
+ /// us to easily discern them from real payments.
+ pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
+ let payment_id = PaymentId(self.keys_manager.get_secure_random_bytes());
+
+ let payment_hash = self.probing_cookie_from_id(&payment_id);
+
+ if hops.len() < 2 {
+ return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
+ err: "No need probing a path with less than two hops".to_string()
+ }))
+ }
+
+ let route = Route { paths: vec![hops], payment_params: None };
+ let onion_session_privs = self.add_new_pending_payment(payment_hash, None, payment_id, &route)?;
+
+ match self.send_payment_internal(&route, payment_hash, &None, None, payment_id, None, onion_session_privs) {
+ Ok(()) => Ok((payment_hash, payment_id)),
Err(e) => Err(e)
}
}
+ /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
+ /// payment probe.
+ pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
+ let target_payment_hash = self.probing_cookie_from_id(payment_id);
+ target_payment_hash == *payment_hash
+ }
+
+ /// Returns the 'probing cookie' for the given [`PaymentId`].
+ fn probing_cookie_from_id(&self, payment_id: &PaymentId) -> PaymentHash {
+ let mut preimage = [0u8; 64];
+ preimage[..32].copy_from_slice(&self.probing_cookie_secret);
+ preimage[32..].copy_from_slice(&payment_id.0);
+ PaymentHash(Sha256::hash(&preimage).into_inner())
+ }
+
/// Handles the generation of a funding transaction, optionally (for tests) with a function
/// which checks the correctness of the funding transaction given the associated channel.
- fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>
- (&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, find_funding_output: FundingOutput) -> Result<(), APIError> {
+ fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<K::Target as KeysInterface>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
+ &self, temporary_channel_id: &[u8; 32], _counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
+ ) -> Result<(), APIError> {
let (chan, msg) = {
let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
Some(mut chan) => {
panic!("Generated duplicate funding txid?");
},
hash_map::Entry::Vacant(e) => {
+ let mut id_to_peer = self.id_to_peer.lock().unwrap();
+ if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+ panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
+ }
e.insert(chan);
}
}
}
#[cfg(test)]
- pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
- self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |_, tx| {
+ pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
+ self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| {
Ok(OutPoint { txid: tx.txid(), index: output_index })
})
}
/// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
/// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
///
+ /// Returns [`APIError::APIMisuseError`] if the funding transaction is not final for propagation
+ /// across the p2p network.
+ ///
/// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
/// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
///
/// not currently support replacing a funding transaction on an existing channel. Instead,
/// create a new channel with a conflicting funding transaction.
///
+ /// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
+ /// the wallet software generating the funding transaction to apply anti-fee sniping as
+ /// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
+ /// for more details.
+ ///
/// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
/// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed
- pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
+ pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
for inp in funding_transaction.input.iter() {
});
}
}
- self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |chan, tx| {
+ {
+ let height = self.best_block.read().unwrap().height();
+ // Transactions are evaluated as final by network mempools at the next block. However, the modules
+ // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
+ // the wallet module is in advance on the LDK view, allow one more block of headroom.
+ if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
+ return Err(APIError::APIMisuseError {
+ err: "Funding transaction absolute timelock is non-final".to_owned()
+ });
+ }
+ }
+ self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
let mut output_index = None;
let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
for (idx, outp) in tx.output.iter().enumerate() {
})
}
- #[allow(dead_code)]
- // Messages of up to 64KB should never end up more than half full with addresses, as that would
- // be absurd. We ensure this by checking that at least 500 (our stated public contract on when
- // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
- // message...
- const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
- #[deny(const_err)]
- #[allow(dead_code)]
- // ...by failing to compile if the number of addresses that would be half of a message is
- // smaller than 500:
- const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500;
-
- /// Regenerates channel_announcements and generates a signed node_announcement from the given
- /// arguments, providing them in corresponding events via
- /// [`get_and_clear_pending_msg_events`], if at least one public channel has been confirmed
- /// on-chain. This effectively re-broadcasts all channel announcements and sends our node
- /// announcement to ensure that the lightning P2P network is aware of the channels we have and
- /// our network addresses.
+ /// Atomically updates the [`ChannelConfig`] for the given channels.
///
- /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this
- /// node to humans. They carry no in-protocol meaning.
+ /// Once the updates are applied, each eligible channel (advertised with a known short channel
+ /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
+ /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
+ /// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
///
- /// `addresses` represent the set (possibly empty) of socket addresses on which this node
- /// accepts incoming connections. These will be included in the node_announcement, publicly
- /// tying these addresses together and to this node. If you wish to preserve user privacy,
- /// addresses should likely contain only Tor Onion addresses.
+ /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
+ /// `counterparty_node_id` is provided.
///
- /// Panics if `addresses` is absurdly large (more than 500).
+ /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
+ /// below [`MIN_CLTV_EXPIRY_DELTA`].
///
- /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
- pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
-
- if addresses.len() > 500 {
- panic!("More than half the message size was taken up by public addresses!");
+ /// If an error is returned, none of the updates should be considered applied.
+ ///
+ /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
+ /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
+ /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
+ /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
+ /// [`ChannelUpdate`]: msgs::ChannelUpdate
+ /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
+ /// [`APIMisuseError`]: APIError::APIMisuseError
+ pub fn update_channel_config(
+ &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
+ ) -> Result<(), APIError> {
+ if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA {
+ return Err(APIError::APIMisuseError {
+ err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
+ });
}
- // While all existing nodes handle unsorted addresses just fine, the spec requires that
- // addresses be sorted for future compatibility.
- addresses.sort_by_key(|addr| addr.get_id());
-
- let announcement = msgs::UnsignedNodeAnnouncement {
- features: NodeFeatures::known(),
- timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32,
- node_id: self.get_our_node_id(),
- rgb, alias, addresses,
- excess_address_data: Vec::new(),
- excess_data: Vec::new(),
- };
- let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
- let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_network_key);
-
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
-
- let mut announced_chans = false;
- for (_, chan) in channel_state.by_id.iter() {
- if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
- msg,
- update_msg: match self.get_channel_update_for_broadcast(chan) {
- Ok(msg) => msg,
- Err(_) => continue,
- },
- });
- announced_chans = true;
- } else {
- // If the channel is not public or has not yet reached funding_locked, check the
- // next channel. If we don't yet have any public channels, we'll skip the broadcast
- // below as peers may not accept it without channels on chain first.
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(
+ &self.total_consistency_lock, &self.persistence_notifier,
+ );
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ for channel_id in channel_ids {
+ let channel_counterparty_node_id = channel_state.by_id.get(channel_id)
+ .ok_or(APIError::ChannelUnavailable {
+ err: format!("Channel with ID {} was not found", log_bytes!(*channel_id)),
+ })?
+ .get_counterparty_node_id();
+ if channel_counterparty_node_id != *counterparty_node_id {
+ return Err(APIError::APIMisuseError {
+ err: "counterparty node id mismatch".to_owned(),
+ });
+ }
+ }
+ for channel_id in channel_ids {
+ let channel = channel_state.by_id.get_mut(channel_id).unwrap();
+ if !channel.update_config(config) {
+ continue;
+ }
+ if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+ } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.get_counterparty_node_id(),
+ msg,
+ });
+ }
}
}
-
- if announced_chans {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
- msg: msgs::NodeAnnouncement {
- signature: node_announce_sig,
- contents: announcement
- },
- });
- }
+ Ok(())
}
/// Processes HTLCs which are pending waiting on random forward delay.
let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
let mut handle_errors = Vec::new();
{
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
+ let mut forward_htlcs = HashMap::new();
+ mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
- for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
+ for (short_chan_id, mut pending_forwards) in forward_htlcs {
if short_chan_id != 0 {
- let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
- Some(chan_id) => chan_id.clone(),
- None => {
+ macro_rules! forwarding_channel_not_found {
+ () => {
for forward_info in pending_forwards.drain(..) {
match forward_info {
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
prev_funding_outpoint } => {
+ macro_rules! failure_handler {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+ log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ phantom_shared_secret: $phantom_ss,
+ });
+
+ let reason = if $next_hop_unknown {
+ HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+ } else {
+ HTLCDestination::FailedPayment{ payment_hash }
+ };
+
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+ reason
+ ));
+ continue;
+ }
+ }
macro_rules! fail_forward {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
{
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- phantom_shared_secret: $phantom_ss,
- });
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }
- ));
- continue;
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
+ }
+ }
+ }
+ macro_rules! failed_payment {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+ {
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
}
}
}
if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
- let phantom_shared_secret = {
- let mut arr = [0; 32];
- arr.copy_from_slice(&SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap())[..]);
- arr
- };
- let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+ let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
+ let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
// `update_fail_malformed_htlc`, meaning here we encrypt the error as
// if it came from us (the second-to-last hop) but contains the sha256
// of the onion.
- fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+ failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
},
Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
- fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+ failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
},
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
- Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret))
+ Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
_ => panic!(),
}
}
}
+ }
+ }
+ let forward_chan_id = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
+ Some((_cp_id, chan_id)) => chan_id.clone(),
+ None => {
+ forwarding_channel_not_found!();
continue;
}
};
- if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
- let mut add_htlc_msgs = Vec::new();
- let mut fail_htlc_msgs = Vec::new();
- for forward_info in pending_forwards.drain(..) {
- match forward_info {
- HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
- routing: PendingHTLCRouting::Forward {
- onion_packet, ..
- }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
- prev_funding_outpoint } => {
- log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- // Phantom payments are only PendingHTLCRouting::Receive.
- phantom_shared_secret: None,
- });
- match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
- Err(e) => {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
- } else {
- panic!("Stated return value requirements in send_htlc() were not met");
- }
- let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code, data }
- ));
- continue;
- },
- Ok(update_add) => {
- match update_add {
- Some(msg) => { add_htlc_msgs.push(msg); },
- None => {
- // Nothing to do here...we're waiting on a remote
- // revoke_and_ack before we can add anymore HTLCs. The Channel
- // will automatically handle building the update_add_htlc and
- // commitment_signed messages when we can.
- // TODO: Do some kind of timer to set the channel as !is_live()
- // as we don't really want others relying on us relaying through
- // this channel currently :/.
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ match channel_state.by_id.entry(forward_chan_id) {
+ hash_map::Entry::Vacant(_) => {
+ forwarding_channel_not_found!();
+ continue;
+ },
+ hash_map::Entry::Occupied(mut chan) => {
+ let mut add_htlc_msgs = Vec::new();
+ let mut fail_htlc_msgs = Vec::new();
+ for forward_info in pending_forwards.drain(..) {
+ match forward_info {
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
+ routing: PendingHTLCRouting::Forward {
+ onion_packet, ..
+ }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
+ prev_funding_outpoint } => {
+ log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ // Phantom payments are only PendingHTLCRouting::Receive.
+ phantom_shared_secret: None,
+ });
+ match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
+ Err(e) => {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
+ }
+ let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::Reason { failure_code, data },
+ HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+ ));
+ continue;
+ },
+ Ok(update_add) => {
+ match update_add {
+ Some(msg) => { add_htlc_msgs.push(msg); },
+ None => {
+ // Nothing to do here...we're waiting on a remote
+ // revoke_and_ack before we can add anymore HTLCs. The Channel
+ // will automatically handle building the update_add_htlc and
+ // commitment_signed messages when we can.
+ // TODO: Do some kind of timer to set the channel as !is_live()
+ // as we don't really want others relying on us relaying through
+ // this channel currently :/.
+ }
}
}
}
- }
- },
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
- },
- HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
- log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
- Err(e) => {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+ },
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
+ Err(e) => {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ },
+ Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
+ Ok(None) => {
+ // Nothing to do here...we're waiting on a remote
+ // revoke_and_ack before we can update the commitment
+ // transaction. The Channel will automatically handle
+ // building the update_fail_htlc and commitment_signed
+ // messages when we can.
+ // We don't need any kind of timer here as they should fail
+ // the channel onto the chain if they can't get our
+ // update_fail_htlc in time, it's not our problem.
}
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
- },
- Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
- Ok(None) => {
- // Nothing to do here...we're waiting on a remote
- // revoke_and_ack before we can update the commitment
- // transaction. The Channel will automatically handle
- // building the update_fail_htlc and commitment_signed
- // messages when we can.
- // We don't need any kind of timer here as they should fail
- // the channel onto the chain if they can't get our
- // update_fail_htlc in time, it's not our problem.
}
- }
- },
+ },
+ }
}
- }
- if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
- let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
- Ok(res) => res,
- Err(e) => {
- // We surely failed send_commitment due to bad keys, in that case
- // close channel and then send error message to peer.
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- let err: Result<(), _> = match e {
- ChannelError::Ignore(_) | ChannelError::Warn(_) => {
- panic!("Stated return value requirements in send_commitment() were not met");
- }
- ChannelError::Close(msg) => {
- log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
- let mut channel = remove_channel!(self, channel_state, chan);
- // ChannelClosed event is generated by handle_error for us.
- Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
- },
- ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
- };
- handle_errors.push((counterparty_node_id, err));
- continue;
+ if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
+ let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
+ Ok(res) => res,
+ Err(e) => {
+ // We surely failed send_commitment due to bad keys, in that case
+ // close channel and then send error message to peer.
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let err: Result<(), _> = match e {
+ ChannelError::Ignore(_) | ChannelError::Warn(_) => {
+ panic!("Stated return value requirements in send_commitment() were not met");
+ }
+ ChannelError::Close(msg) => {
+ log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
+ let mut channel = remove_channel!(self, chan);
+ // ChannelClosed event is generated by handle_error for us.
+ Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
+ },
+ };
+ handle_errors.push((counterparty_node_id, err));
+ continue;
+ }
+ };
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+ continue;
+ }
}
- };
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
- continue;
+ log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
+ add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: add_htlc_msgs,
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: fail_htlc_msgs,
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed: commitment_msg,
+ },
+ });
}
- log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
- add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: add_htlc_msgs,
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: fail_htlc_msgs,
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed: commitment_msg,
- },
- });
}
- } else {
- unreachable!();
}
} else {
for forward_info in pending_forwards.drain(..) {
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
prev_funding_outpoint } => {
- let (cltv_expiry, onion_payload, phantom_shared_secret) = match routing {
- PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } =>
- (incoming_cltv_expiry, OnionPayload::Invoice(payment_data), phantom_shared_secret),
+ let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
+ PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
+ let _legacy_hop_data = Some(payment_data.clone());
+ (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
+ },
PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
- (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None),
+ (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None, None),
_ => {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
}
},
value: amt_to_forward,
timer_ticks: 0,
+ total_msat: if let Some(data) = &payment_data { data.total_msat } else { amt_to_forward },
cltv_expiry,
onion_payload,
};
macro_rules! fail_htlc {
- ($htlc: expr) => {
+ ($htlc: expr, $payment_hash: expr) => {
let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(
&byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
phantom_shared_secret,
}), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash: $payment_hash },
));
}
}
macro_rules! check_total_value {
- ($payment_data_total_msat: expr, $payment_secret: expr, $payment_preimage: expr) => {{
+ ($payment_data: expr, $payment_preimage: expr) => {{
let mut payment_received_generated = false;
- let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
- .or_insert(Vec::new());
+ let purpose = || {
+ events::PaymentPurpose::InvoicePayment {
+ payment_preimage: $payment_preimage,
+ payment_secret: $payment_data.payment_secret,
+ }
+ };
+ let mut claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let (_, htlcs) = claimable_htlcs.entry(payment_hash)
+ .or_insert_with(|| (purpose(), Vec::new()));
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
continue
}
}
for htlc in htlcs.iter() {
total_value += htlc.value;
match &htlc.onion_payload {
- OnionPayload::Invoice(htlc_payment_data) => {
- if htlc_payment_data.total_msat != $payment_data_total_msat {
+ OnionPayload::Invoice { .. } => {
+ if htlc.total_msat != $payment_data.total_msat {
log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
- log_bytes!(payment_hash.0), $payment_data_total_msat, htlc_payment_data.total_msat);
+ log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat);
total_value = msgs::MAX_VALUE_MSAT;
}
if total_value >= msgs::MAX_VALUE_MSAT { break; }
_ => unreachable!(),
}
}
- if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data_total_msat {
+ if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
- log_bytes!(payment_hash.0), total_value, $payment_data_total_msat);
- fail_htlc!(claimable_htlc);
- } else if total_value == $payment_data_total_msat {
+ log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
+ fail_htlc!(claimable_htlc, payment_hash);
+ } else if total_value == $payment_data.total_msat {
htlcs.push(claimable_htlc);
new_events.push(events::Event::PaymentReceived {
payment_hash,
- purpose: events::PaymentPurpose::InvoicePayment {
- payment_preimage: $payment_preimage,
- payment_secret: $payment_secret,
- },
- amt: total_value,
+ purpose: purpose(),
+ amount_msat: total_value,
});
payment_received_generated = true;
} else {
match payment_secrets.entry(payment_hash) {
hash_map::Entry::Vacant(_) => {
match claimable_htlc.onion_payload {
- OnionPayload::Invoice(ref payment_data) => {
- let payment_preimage = match inbound_payment::verify(payment_hash, payment_data.clone(), self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
+ OnionPayload::Invoice { .. } => {
+ let payment_data = payment_data.unwrap();
+ let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
Ok(payment_preimage) => payment_preimage,
Err(()) => {
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
continue
}
};
- let payment_data_total_msat = payment_data.total_msat;
- let payment_secret = payment_data.payment_secret.clone();
- check_total_value!(payment_data_total_msat, payment_secret, payment_preimage);
+ check_total_value!(payment_data, payment_preimage);
},
OnionPayload::Spontaneous(preimage) => {
- match channel_state.claimable_htlcs.entry(payment_hash) {
+ match self.claimable_htlcs.lock().unwrap().entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
- e.insert(vec![claimable_htlc]);
+ let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
+ e.insert((purpose.clone(), vec![claimable_htlc]));
new_events.push(events::Event::PaymentReceived {
payment_hash,
- amt: amt_to_forward,
- purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
+ amount_msat: amt_to_forward,
+ purpose,
});
},
hash_map::Entry::Occupied(_) => {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
}
}
}
}
},
hash_map::Entry::Occupied(inbound_payment) => {
- let payment_data =
- if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload {
- data.clone()
- } else {
- log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
- continue
- };
+ if payment_data.is_none() {
+ log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc, payment_hash);
+ continue
+ };
+ let payment_data = payment_data.unwrap();
if inbound_payment.get().payment_secret != payment_data.payment_secret {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
} else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
} else {
- let payment_received_generated = check_total_value!(payment_data.total_msat, payment_data.payment_secret, inbound_payment.get().payment_preimage);
+ let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
if payment_received_generated {
inbound_payment.remove_entry();
}
}
}
- for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
+ for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
+ self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination);
}
self.forward_htlcs(&mut phantom_receives);
self.process_background_events();
}
- fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, [u8; 32]>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
+ fn update_channel_fee(&self, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as KeysInterface>::Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
Ok(res) => Ok(res),
Err(e) => {
- let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ let (drop, res) = convert_chan_err!(self, e, chan, chan_id);
if drop { retain_channel = false; }
Err(res)
}
};
let ret_err = match res {
Ok(Some((update_fee, commitment_signed, monitor_update))) => {
- if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
- let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
- if drop { retain_channel = false; }
- res
- } else {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: Some(update_fee),
- commitment_signed,
- },
- });
- Ok(())
+ match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: Some(update_fee),
+ commitment_signed,
+ },
+ });
+ Ok(())
+ },
+ e => {
+ let (res, drop) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
+ if drop { retain_channel = false; }
+ res
+ }
}
},
Ok(None) => Ok(()),
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = NotifyOption::SkipPersist;
- let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let mut handle_errors = Vec::new();
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
- let short_to_id = &mut channel_state.short_to_id;
channel_state.by_id.retain(|chan_id, chan| {
- let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if err.is_err() {
handle_errors.push(err);
});
}
+ fn remove_stale_resolved_payments(&self) {
+ // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
+ // from the map. However, if we did that immediately when the last payment HTLC is claimed,
+ // this could race the user making a duplicate send_payment call and our idempotency
+ // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
+ // removal. This should be more than sufficient to ensure the idempotency of any
+ // `send_payment` calls that were made at the same time the `PaymentSent` event was being
+ // processed.
+ let mut pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+ let pending_events = self.pending_events.lock().unwrap();
+ pending_outbound_payments.retain(|payment_id, payment| {
+ if let PendingOutboundPayment::Fulfilled { session_privs, timer_ticks_without_htlcs, .. } = payment {
+ let mut no_remaining_entries = session_privs.is_empty();
+ if no_remaining_entries {
+ for ev in pending_events.iter() {
+ match ev {
+ events::Event::PaymentSent { payment_id: Some(ev_payment_id), .. } |
+ events::Event::PaymentPathSuccessful { payment_id: ev_payment_id, .. } |
+ events::Event::PaymentPathFailed { payment_id: Some(ev_payment_id), .. } => {
+ if payment_id == ev_payment_id {
+ no_remaining_entries = false;
+ break;
+ }
+ },
+ _ => {},
+ }
+ }
+ }
+ if no_remaining_entries {
+ *timer_ticks_without_htlcs += 1;
+ *timer_ticks_without_htlcs <= IDEMPOTENCY_TIMEOUT_TICKS
+ } else {
+ *timer_ticks_without_htlcs = 0;
+ true
+ }
+ } else { true }
+ });
+ }
+
/// Performs actions which should happen on startup and roughly once per minute thereafter.
///
/// This currently includes:
/// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
/// than a minute, informing the network that they should no longer attempt to route over
/// the channel.
+ /// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
+ /// with the current `ChannelConfig`.
///
/// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
/// estimate fetches.
let mut should_persist = NotifyOption::SkipPersist;
if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
- let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let mut handle_errors = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
- let short_to_id = &mut channel_state.short_to_id;
channel_state.by_id.retain(|chan_id, chan| {
let counterparty_node_id = chan.get_counterparty_node_id();
- let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
+ let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(pending_msg_events, chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if err.is_err() {
handle_errors.push((err, counterparty_node_id));
if !retain_channel { return false; }
if let Err(e) = chan.timer_check_closing_negotiation_progress() {
- let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
+ let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
handle_errors.push((Err(err), chan.get_counterparty_node_id()));
if needs_close { return false; }
}
_ => {},
}
+ chan.maybe_expire_prev_config();
+
true
});
+ }
- channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
- if htlcs.is_empty() {
- // This should be unreachable
- debug_assert!(false);
+ self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ if htlcs.is_empty() {
+ // This should be unreachable
+ debug_assert!(false);
+ return false;
+ }
+ if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
+ // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
+ // In this case we're not going to handle any timeouts of the parts here.
+ if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
+ return true;
+ } else if htlcs.into_iter().any(|htlc| {
+ htlc.timer_ticks += 1;
+ return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
+ }) {
+ timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
return false;
}
- if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload {
- // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
- // In this case we're not going to handle any timeouts of the parts here.
- if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
- return true;
- } else if htlcs.into_iter().any(|htlc| {
- htlc.timer_ticks += 1;
- return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
- }) {
- timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
- return false;
- }
- }
- true
- });
- }
+ }
+ true
+ });
for htlc_source in timed_out_mpp_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
+ let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
+ self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
}
for (err, counterparty_node_id) in handle_errors.drain(..) {
let _ = handle_error!(self, err, counterparty_node_id);
}
+
+ self.remove_stale_resolved_payments();
+
should_persist
});
}
/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
/// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
/// along the path (including in our own channel on which we received it).
- /// Returns false if no payment was found to fail backwards, true if the process of failing the
- /// HTLC backwards has been started.
- pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
+ ///
+ /// Note that in some cases around unclean shutdown, it is possible the payment may have
+ /// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
+ /// second copy of) the [`events::Event::PaymentReceived`] event. Alternatively, the payment
+ /// may have already been failed automatically by LDK if it was nearing its expiration time.
+ ///
+ /// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
+ /// [`ChannelManager::claim_funds`]), you should still monitor for
+ /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
+ /// startup during which time claims that were in-progress at shutdown may be replayed.
+ pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
- if let Some(mut sources) = removed_source {
+ let removed_source = self.claimable_htlcs.lock().unwrap().remove(payment_hash);
+ if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
self.best_block.read().unwrap().height()));
- self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
+ self.fail_htlc_backwards_internal(
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash: *payment_hash });
}
- true
- } else { false }
+ }
}
/// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
///
/// This is for failures on the channel on which the HTLC was *received*, not failures
/// forwarding
- fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
+ fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> (u16, Vec<u8>) {
// We can't be sure what SCID was used when relaying inbound towards us, so we have to
// guess somewhat. If its a public channel, we figure best to just use the real SCID (as
// we're not leaking that we have a channel with the counterparty), otherwise we try to use
/// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
/// that we want to return and a channel.
- fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
+ fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<<K::Target as KeysInterface>::Signer>) -> (u16, Vec<u8>) {
debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
- let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 4));
+ let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
if desired_err_code == 0x1000 | 20 {
- // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+ // No flags for `disabled_flags` are currently defined so they're always two zero bytes.
+ // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
0u16.write(&mut enc).expect("Writes cannot fail");
}
- (upd.serialized_length() as u16).write(&mut enc).expect("Writes cannot fail");
+ (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
+ msgs::ChannelUpdate::TYPE.write(&mut enc).expect("Writes cannot fail");
upd.write(&mut enc).expect("Writes cannot fail");
(desired_err_code, enc.0)
} else {
// Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
// failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
// be surfaced to the user.
- fn fail_holding_cell_htlcs(&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32]) {
+ fn fail_holding_cell_htlcs(
+ &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
+ counterparty_node_id: &PublicKey
+ ) {
for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
- match htlc_src {
- HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => {
- let (failure_code, onion_failure_data) =
- match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
- hash_map::Entry::Occupied(chan_entry) => {
- self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
- },
- hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
- };
- let channel_state = self.channel_state.lock().unwrap();
- self.fail_htlc_backwards_internal(channel_state,
- htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
- },
- HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
- let mut session_priv_bytes = [0; 32];
- session_priv_bytes.copy_from_slice(&session_priv[..]);
- let mut outbounds = self.pending_outbound_payments.lock().unwrap();
- if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
- if payment.get_mut().remove(&session_priv_bytes, Some(&path)) && !payment.get().is_fulfilled() {
- let retry = if let Some(payment_params_data) = payment_params {
- let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
- Some(RouteParameters {
- payment_params: payment_params_data,
- final_value_msat: path_last_hop.fee_msat,
- final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
- })
- } else { None };
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentPathFailed {
- payment_id: Some(payment_id),
- payment_hash,
- rejected_by_dest: false,
- network_update: None,
- all_paths_failed: payment.get().remaining_parts() == 0,
- path: path.clone(),
- short_channel_id: None,
- retry,
- #[cfg(test)]
- error_code: None,
- #[cfg(test)]
- error_data: None,
- });
- if payment.get().abandoned() && payment.get().remaining_parts() == 0 {
- pending_events.push(events::Event::PaymentFailed {
- payment_id,
- payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
- });
- payment.remove();
- }
- }
- } else {
- log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
- }
- },
- };
+ let (failure_code, onion_failure_data) =
+ match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
+ hash_map::Entry::Occupied(chan_entry) => {
+ self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
+ },
+ hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
+ };
+
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
+ self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver);
}
}
/// Fails an HTLC backwards to the sender of it to us.
- /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
- /// There are several callsites that do stupid things like loop over a list of payment_hashes
- /// to fail and take the channel_state lock for each iteration (as we take ownership and may
- /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
- /// still-available channels.
- fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+ /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
+ fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) {
+ #[cfg(debug_assertions)]
+ {
+ // Ensure that the `channel_state` lock is not held when calling this function.
+ // This ensures that future code doesn't introduce a lock_order requirement for
+ // `forward_htlcs` to be locked after the `channel_state` lock, which calling this
+ // function with the `channel_state` locked would.
+ assert!(self.channel_state.try_lock().is_ok());
+ }
+
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
//identify whether we sent it or not based on the (I presume) very different runtime
//between the branches here. We should make this async and move it into the forward HTLCs
log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
return;
}
- mem::drop(channel_state_lock);
- let retry = if let Some(payment_params_data) = payment_params {
+ let mut retry = if let Some(payment_params_data) = payment_params {
let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
Some(RouteParameters {
payment_params: payment_params_data.clone(),
let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
#[cfg(not(test))]
let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
- // TODO: If we decided to blame ourselves (or one of our channels) in
- // process_onion_failure we should close that channel as it implies our
- // next-hop is needlessly blaming us!
- events::Event::PaymentPathFailed {
- payment_id: Some(payment_id),
- payment_hash: payment_hash.clone(),
- rejected_by_dest: !payment_retryable,
- network_update,
- all_paths_failed,
- path: path.clone(),
- short_channel_id,
- retry,
-#[cfg(test)]
- error_code: onion_error_code,
-#[cfg(test)]
- error_data: onion_error_data
+
+ if self.payment_is_probe(payment_hash, &payment_id) {
+ if !payment_retryable {
+ events::Event::ProbeSuccessful {
+ payment_id,
+ payment_hash: payment_hash.clone(),
+ path: path.clone(),
+ }
+ } else {
+ events::Event::ProbeFailed {
+ payment_id,
+ payment_hash: payment_hash.clone(),
+ path: path.clone(),
+ short_channel_id,
+ }
+ }
+ } else {
+ // TODO: If we decided to blame ourselves (or one of our channels) in
+ // process_onion_failure we should close that channel as it implies our
+ // next-hop is needlessly blaming us!
+ if let Some(scid) = short_channel_id {
+ retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
+ }
+ events::Event::PaymentPathFailed {
+ payment_id: Some(payment_id),
+ payment_hash: payment_hash.clone(),
+ payment_failed_permanently: !payment_retryable,
+ network_update,
+ all_paths_failed,
+ path: path.clone(),
+ short_channel_id,
+ retry,
+ #[cfg(test)]
+ error_code: onion_error_code,
+ #[cfg(test)]
+ error_data: onion_error_data
+ }
}
},
&HTLCFailReason::Reason {
.. } => {
// we get a fail_malformed_htlc from the first hop
// TODO: We'd like to generate a NetworkUpdate for temporary
- // failures here, but that would be insufficient as get_route
+ // failures here, but that would be insufficient as find_route
// generally ignores its view of our own channels as we provide them via
// ChannelDetails.
// TODO: For non-temporary failures, we really should be closing the
// channel here as we apparently can't relay through them anyway.
- events::Event::PaymentPathFailed {
- payment_id: Some(payment_id),
- payment_hash: payment_hash.clone(),
- rejected_by_dest: path.len() == 1,
- network_update: None,
- all_paths_failed,
- path: path.clone(),
- short_channel_id: Some(path.first().unwrap().short_channel_id),
- retry,
+ let scid = path.first().unwrap().short_channel_id;
+ retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid));
+
+ if self.payment_is_probe(payment_hash, &payment_id) {
+ events::Event::ProbeFailed {
+ payment_id,
+ payment_hash: payment_hash.clone(),
+ path: path.clone(),
+ short_channel_id: Some(scid),
+ }
+ } else {
+ events::Event::PaymentPathFailed {
+ payment_id: Some(payment_id),
+ payment_hash: payment_hash.clone(),
+ payment_failed_permanently: false,
+ network_update: None,
+ all_paths_failed,
+ path: path.clone(),
+ short_channel_id: Some(scid),
+ retry,
#[cfg(test)]
- error_code: Some(*failure_code),
+ error_code: Some(*failure_code),
#[cfg(test)]
- error_data: Some(data.clone()),
+ error_data: Some(data.clone()),
+ }
}
}
};
pending_events.push(path_failure);
if let Some(ev) = full_failure_ev { pending_events.push(ev); }
},
- HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => {
+ HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => {
let err_packet = match onion_error {
HTLCFailReason::Reason { failure_code, data } => {
log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
};
let mut forward_event = None;
- if channel_state_lock.forward_htlcs.is_empty() {
+ let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
+ if forward_htlcs.is_empty() {
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
}
- match channel_state_lock.forward_htlcs.entry(short_channel_id) {
+ match forward_htlcs.entry(short_channel_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
},
entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
}
}
- mem::drop(channel_state_lock);
+ mem::drop(forward_htlcs);
+ let mut pending_events = self.pending_events.lock().unwrap();
if let Some(time) = forward_event {
- let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(events::Event::PendingHTLCsForwardable {
time_forwardable: time
});
}
+ pending_events.push(events::Event::HTLCHandlingFailed {
+ prev_channel_id: outpoint.to_channel_id(),
+ failed_next_destination: destination
+ });
},
}
}
/// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any
/// [`MessageSendEvent`]s needed to claim the payment.
///
+ /// Note that calling this method does *not* guarantee that the payment has been claimed. You
+ /// *must* wait for an [`Event::PaymentClaimed`] event which upon a successful claim will be
+ /// provided to your [`EventHandler`] when [`process_pending_events`] is next called.
+ ///
/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
/// event matches your expectation. If you fail to do so and call this method, you may provide
/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
- /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now
- /// pending for processing via [`get_and_clear_pending_msg_events`].
- ///
/// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived
+ /// [`Event::PaymentClaimed`]: crate::util::events::Event::PaymentClaimed
+ /// [`process_pending_events`]: EventsProvider::process_pending_events
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
/// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
- pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
+ pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
- if let Some(mut sources) = removed_source {
+ let removed_source = self.claimable_htlcs.lock().unwrap().remove(&payment_hash);
+ if let Some((payment_purpose, mut sources)) = removed_source {
assert!(!sources.is_empty());
// If we are claiming an MPP payment, we have to take special care to ensure that each
// we got all the HTLCs and then a channel closed while we were waiting for the user to
// provide the preimage, so worrying too much about the optimal handling isn't worth
// it.
+ let mut claimable_amt_msat = 0;
+ let mut expected_amt_msat = None;
let mut valid_mpp = true;
+ let mut errs = Vec::new();
+ let mut claimed_any_htlcs = false;
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
for htlc in sources.iter() {
- if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
+ let chan_id = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
+ Some((_cp_id, chan_id)) => chan_id.clone(),
+ None => {
+ valid_mpp = false;
+ break;
+ }
+ };
+
+ if let None = channel_state.by_id.get(&chan_id) {
valid_mpp = false;
break;
}
- }
- let mut errs = Vec::new();
- let mut claimed_any_htlcs = false;
- for htlc in sources.drain(..) {
- if !valid_mpp {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
- self.best_block.read().unwrap().height()));
- self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
- HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
- } else {
- match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
+ if expected_amt_msat.is_some() && expected_amt_msat != Some(htlc.total_msat) {
+ log_error!(self.logger, "Somehow ended up with an MPP payment with different total amounts - this should not be reachable!");
+ debug_assert!(false);
+ valid_mpp = false;
+ break;
+ }
+ expected_amt_msat = Some(htlc.total_msat);
+ if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
+ // We don't currently support MPP for spontaneous payments, so just check
+ // that there's one payment here and move on.
+ if sources.len() != 1 {
+ log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
+ debug_assert!(false);
+ valid_mpp = false;
+ break;
+ }
+ }
+
+ claimable_amt_msat += htlc.value;
+ }
+ if sources.is_empty() || expected_amt_msat.is_none() {
+ log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
+ return;
+ }
+ if claimable_amt_msat != expected_amt_msat.unwrap() {
+ log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
+ expected_amt_msat.unwrap(), claimable_amt_msat);
+ return;
+ }
+ if valid_mpp {
+ for htlc in sources.drain(..) {
+ match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) {
ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
if let msgs::ErrorAction::IgnoreError = err.err.action {
// We got a temporary failure updating monitor, but will claim the
}
}
}
+ mem::drop(channel_state_lock);
+ if !valid_mpp {
+ for htlc in sources.drain(..) {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
+ self.best_block.read().unwrap().height()));
+ self.fail_htlc_backwards_internal(
+ HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash } );
+ }
+ }
- // Now that we've done the entire above loop in one lock, we can handle any errors
- // which were generated.
- channel_state.take();
+ if claimed_any_htlcs {
+ self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
+ payment_hash,
+ purpose: payment_purpose,
+ amount_msat: claimable_amt_msat,
+ });
+ }
+ // Now we can handle any errors which were generated.
for (counterparty_node_id, err) in errs.drain(..) {
let res: Result<(), _> = Err(err);
let _ = handle_error!(self, res, counterparty_node_id);
}
-
- claimed_any_htlcs
- } else { false }
+ }
}
- fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
+ fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
let channel_state = &mut **channel_state_lock;
- let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
- Some(chan_id) => chan_id.clone(),
+ let chan_id = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
+ Some((_cp_id, chan_id)) => chan_id.clone(),
None => {
return ClaimFundsFromHop::PrevHopForceClosed
}
match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
Ok(msgs_monitor_option) => {
if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
- "Failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
- return ClaimFundsFromHop::MonitorUpdateFail(
- chan.get().get_counterparty_node_id(),
- handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
- Some(htlc_value_msat)
- );
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
+ "Failed to update channel monitor with preimage {:?}: {:?}",
+ payment_preimage, e);
+ return ClaimFundsFromHop::MonitorUpdateFail(
+ chan.get().get_counterparty_node_id(),
+ handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
+ Some(htlc_value_msat)
+ );
+ }
}
if let Some((msg, commitment_signed)) = msgs {
log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
}
},
Err((e, monitor_update)) => {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
- "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
- payment_preimage, e);
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
+ "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+ payment_preimage, e);
+ },
}
let counterparty_node_id = chan.get().get_counterparty_node_id();
- let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
+ let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
if drop {
chan.remove_entry();
}
return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
},
}
- } else { unreachable!(); }
+ } else { return ClaimFundsFromHop::PrevHopForceClosed }
}
fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
}
);
}
- if payment.get().remaining_parts() == 0 {
- payment.remove();
- }
}
}
}
}
- fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool) {
+ fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<<K::Target as KeysInterface>::Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
mem::drop(channel_state_lock);
}
);
}
-
- if payment.get().remaining_parts() == 0 {
- payment.remove();
- }
}
} else {
log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
// We update the ChannelMonitor on the backward link, after
// receiving an offchain preimage event from the forward link (the
// event being update_fulfill_htlc).
- if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
+ let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update);
+ if update_res != ChannelMonitorUpdateStatus::Completed {
+ // TODO: This needs to be handled somehow - if we receive a monitor update
+ // with a preimage we *must* somehow manage to propagate it to the upstream
+ // channel, or we must have an ability to receive the same event and try
+ // again on restart.
log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
+ payment_preimage, update_res);
}
// Note that we do *not* set `claimed_htlc` to false here. In fact, this
// totally could be a duplicate claim, but we have no way of knowing
} else { None };
let mut pending_events = self.pending_events.lock().unwrap();
+ let prev_channel_id = Some(prev_outpoint.to_channel_id());
+ let next_channel_id = Some(next_channel_id);
- let source_channel_id = Some(prev_outpoint.to_channel_id());
pending_events.push(events::Event::PaymentForwarded {
- source_channel_id,
fee_earned_msat,
claim_from_onchain_tx: from_onchain,
+ prev_channel_id,
+ next_channel_id,
});
}
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let chan_restoration_res;
- let (mut pending_failures, finalized_claims) = {
+ let (mut pending_failures, finalized_claims, counterparty_node_id) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
return;
}
+ let counterparty_node_id = channel.get().get_counterparty_node_id();
let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
- let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() {
+ let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
// We only send a channel_update in the case where we are just now sending a
- // funding_locked and the channel is in a usable state. We may re-send a
+ // channel_ready and the channel is in a usable state. We may re-send a
// channel_update later through the announcement_signatures process for public
// channels, but there's no reason not to just inform our counterparty of our fees
// now.
})
} else { None }
} else { None };
- chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked, updates.announcement_sigs);
+ chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
- (updates.failed_htlcs, updates.finalized_claimed_htlcs)
+
+ (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
};
post_handle_chan_restoration!(self, chan_restoration_res);
self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
+ self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
}
}
- /// Called to accept a request to open a channel after [`Event::OpenChannelRequest`] has been
- /// triggered.
+ /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
///
- /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted.
+ /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted,
+ /// and the `counterparty_node_id` parameter is the id of the peer which has requested to open
+ /// the channel.
///
- /// For inbound channels, the `user_channel_id` parameter will be provided back in
+ /// The `user_channel_id` parameter will be provided back in
/// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
- /// with which `accept_inbound_channel` call.
+ /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
+ ///
+ /// Note that this method will return an error and reject the channel, if it requires support
+ /// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer_0conf` must be
+ /// used to accept such channels.
///
/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
- pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], user_channel_id: u64) -> Result<(), APIError> {
+ pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+ self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id)
+ }
+
+ /// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating
+ /// it as confirmed immediately.
+ ///
+ /// The `user_channel_id` parameter will be provided back in
+ /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
+ /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
+ ///
+ /// Unlike [`ChannelManager::accept_inbound_channel`], this method accepts the incoming channel
+ /// and (if the counterparty agrees), enables forwarding of payments immediately.
+ ///
+ /// This fully trusts that the counterparty has honestly and correctly constructed the funding
+ /// transaction and blindly assumes that it will eventually confirm.
+ ///
+ /// If it does not confirm before we decide to close the channel, or if the funding transaction
+ /// does not pay to the correct script the correct amount, *you will lose funds*.
+ ///
+ /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
+ /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
+ pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
+ self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id)
+ }
+
+ fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u64) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state_lock = self.channel_state.lock().unwrap();
if !channel.get().inbound_is_awaiting_accept() {
return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
}
+ if *counterparty_node_id != channel.get().get_counterparty_node_id() {
+ return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
+ }
+ if accept_0conf {
+ channel.get_mut().set_0conf();
+ } else if channel.get().get_channel_type().requires_zero_conf() {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.get().get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ }
+ };
+ channel_state.pending_msg_events.push(send_msg_err_event);
+ let _ = remove_channel!(self, channel);
+ return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
+ }
+
channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
node_id: channel.get().get_counterparty_node_id(),
msg: channel.get_mut().accept_inbound_channel(user_channel_id),
},
hash_map::Entry::Vacant(entry) => {
if !self.default_configuration.manually_accept_inbound_channels {
+ if channel.get_channel_type().requires_zero_conf() {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
+ }
channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
node_id: counterparty_node_id.clone(),
msg: channel.accept_inbound_channel(0),
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.peer_channel_config_limits, &their_features), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan);
(chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(events::Event::FundingGenerationReady {
temporary_channel_id: msg.temporary_channel_id,
+ counterparty_node_id: *counterparty_node_id,
channel_value_satoshis: value,
output_script,
user_channel_id: user_id,
}
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
- let ((funding_msg, monitor), mut chan) = {
+ let ((funding_msg, monitor, mut channel_ready), mut chan) = {
let best_block = *self.best_block.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove())
+ (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), chan), chan.remove())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
}
};
// Because we have exclusive ownership of the channel here we can release the channel_state
// lock before watch_channel
- if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
- match e {
- ChannelMonitorUpdateErr::PermanentFailure => {
- // Note that we reply with the new channel_id in error messages if we gave up on the
- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
- // any messages referencing a previously-closed channel anyway.
- // We do not do a force-close here as that would generate a monitor update for
- // a monitor that we didn't manage to store (and that we don't care about - we
- // don't respond with the funding_signed so the channel can never go on chain).
- let (_monitor_update, failed_htlcs) = chan.force_shutdown(true);
- assert!(failed_htlcs.is_empty());
- return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
- },
- ChannelMonitorUpdateErr::TemporaryFailure => {
- // There's no problem signing a counterparty's funding transaction if our monitor
- // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
- // accepted payment from yet. We do, however, need to wait to send our funding_locked
- // until we have persisted our monitor.
- chan.monitor_update_failed(false, false, Vec::new(), Vec::new(), Vec::new());
- },
- }
+ match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ ChannelMonitorUpdateStatus::PermanentFailure => {
+ // Note that we reply with the new channel_id in error messages if we gave up on the
+ // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+ // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+ // any messages referencing a previously-closed channel anyway.
+ // We do not propagate the monitor update to the user as it would be for a monitor
+ // that we didn't manage to store (and that we don't care about - we don't respond
+ // with the funding_signed so the channel can never go on chain).
+ let (_monitor_update, failed_htlcs) = chan.force_shutdown(false);
+ assert!(failed_htlcs.is_empty());
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
+ },
+ ChannelMonitorUpdateStatus::InProgress => {
+ // There's no problem signing a counterparty's funding transaction if our monitor
+ // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+ // accepted payment from yet. We do, however, need to wait to send our channel_ready
+ // until we have persisted our monitor.
+ chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new());
+ channel_ready = None; // Don't send the channel_ready now
+ },
}
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
},
hash_map::Entry::Vacant(e) => {
+ let mut id_to_peer = self.id_to_peer.lock().unwrap();
+ match id_to_peer.entry(chan.channel_id()) {
+ hash_map::Entry::Occupied(_) => {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(
+ "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
+ funding_msg.channel_id))
+ },
+ hash_map::Entry::Vacant(i_e) => {
+ i_e.insert(chan.get_counterparty_node_id());
+ }
+ }
channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
node_id: counterparty_node_id.clone(),
msg: funding_msg,
});
+ if let Some(msg) = channel_ready {
+ send_channel_ready!(self, channel_state.pending_msg_events, chan, msg);
+ }
e.insert(chan);
}
}
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (monitor, funding_tx) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
+ let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
Ok(update) => update,
- Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
+ Err(e) => try_chan_entry!(self, Err(e), chan),
};
- if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
- let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
- // We weren't able to watch the channel to begin with, so no updates should be made on
- // it. Previously, full_stack_target found an (unreachable) panic when the
- // monitor update contained within `shutdown_finish` was applied.
- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
- shutdown_finish.0.take();
+ match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
+ }
}
- }
- return res
+ return res
+ },
+ }
+ if let Some(msg) = channel_ready {
+ send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
}
funding_tx
},
Ok(())
}
- fn internal_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
+ fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(msg.channel_id) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().funding_locked(&msg, self.get_our_node_id(),
- self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan);
+ let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
+ self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan);
if let Some(announcement_sigs) = announcement_sigs_opt {
log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
});
}
}
+
+ emit_channel_ready_event!(self, chan.get_mut());
+
Ok(())
},
hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
}
- let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry);
+ let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
dropped_htlcs = htlcs;
// Update the monitor with the shutdown script if necessary.
if let Some(monitor_update) = monitor_update {
- if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
- let (result, is_permanent) =
- handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, channel_state, chan_entry);
- break result;
- }
+ let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
+ let (result, is_permanent) =
+ handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ if is_permanent {
+ remove_channel!(self, chan_entry);
+ break result;
}
}
}
};
for htlc_source in dropped_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
+ self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry);
+ let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
if let Some(msg) = closing_signed {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
node_id: counterparty_node_id.clone(),
// also implies there are no pending HTLCs left on the channel, so we can
// fully delete it from tracking (the channel monitor is still around to
// watch for old state broadcasts)!
- (tx, Some(remove_channel!(self, channel_state, chan_entry)))
+ (tx, Some(remove_channel!(self, chan_entry)))
} else { (tx, None) }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
//but we should prevent it anyway.
- let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+ let pending_forward_info = self.decode_update_add_htlc_onion(msg);
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(msg.channel_id) {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let create_pending_htlc_status = |chan: &Channel<Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ let create_pending_htlc_status = |chan: &Channel<<K::Target as KeysInterface>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just
// want to reject the new HTLC and fail it backwards instead of forwarding.
_ => pending_forward_info
}
};
- try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan)
+ try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan)
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false);
+ self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
Ok(())
}
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
}
if (msg.failure_code & 0x8000) == 0 {
let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
- try_chan_entry!(self, Err(chan_err), channel_state, chan);
+ try_chan_entry!(self, Err(chan_err), chan);
}
- try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), chan);
Ok(())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
let (revoke_and_ack, commitment_signed, monitor_update) =
match chan.get_mut().commitment_signed(&msg, &self.logger) {
- Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
+ Err((None, e)) => try_chan_entry!(self, Err(e), chan),
Err((Some(update), e)) => {
assert!(chan.get().is_awaiting_monitor_update());
let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
- try_chan_entry!(self, Err(e), channel_state, chan);
+ try_chan_entry!(self, Err(e), chan);
unreachable!();
},
Ok(res) => res
};
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
+ let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
+ if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+ return Err(e);
}
+
channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
node_id: counterparty_node_id.clone(),
msg: revoke_and_ack,
for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
let mut forward_event = None;
if !pending_forwards.is_empty() {
- let mut channel_state = self.channel_state.lock().unwrap();
- if channel_state.forward_htlcs.is_empty() {
+ let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
+ if forward_htlcs.is_empty() {
forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
}
for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
- match channel_state.forward_htlcs.entry(match forward_info.routing {
+ match forward_htlcs.entry(match forward_info.routing {
PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
PendingHTLCRouting::Receive { .. } => 0,
PendingHTLCRouting::ReceiveKeysend { .. } => 0,
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+ let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
let raa_updates = break_chan_entry!(self,
- chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
+ chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
- if was_frozen_for_monitor {
- assert!(raa_updates.commitment_update.is_none());
- assert!(raa_updates.accepted_htlcs.is_empty());
- assert!(raa_updates.failed_htlcs.is_empty());
- assert!(raa_updates.finalized_claimed_htlcs.is_empty());
- break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
- } else {
- if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
- RAACommitmentOrder::CommitmentFirst, false,
- raa_updates.commitment_update.is_some(),
- raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
- raa_updates.finalized_claimed_htlcs) {
- break Err(e);
- } else { unreachable!(); }
- }
+ let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
+ if was_paused_for_mon_update {
+ assert!(update_res != ChannelMonitorUpdateStatus::Completed);
+ assert!(raa_updates.commitment_update.is_none());
+ assert!(raa_updates.accepted_htlcs.is_empty());
+ assert!(raa_updates.failed_htlcs.is_empty());
+ assert!(raa_updates.finalized_claimed_htlcs.is_empty());
+ break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
+ }
+ if update_res != ChannelMonitorUpdateStatus::Completed {
+ if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
+ RAACommitmentOrder::CommitmentFirst, false,
+ raa_updates.commitment_update.is_some(), false,
+ raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+ raa_updates.finalized_claimed_htlcs) {
+ break Err(e);
+ } else { unreachable!(); }
}
if let Some(updates) = raa_updates.commitment_update {
channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
raa_updates.finalized_claimed_htlcs,
chan.get().get_short_channel_id()
- .expect("RAA should only work on a short-id-available channel"),
+ .unwrap_or(chan.get().outbound_scid_alias()),
chan.get().get_funding_txo().unwrap()))
},
hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id);
+ self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
match res {
Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
short_channel_id, channel_outpoint)) =>
{
for failure in pending_failures.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
+ self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver);
}
self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
self.finalize_claims(finalized_claim_htlcs);
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan);
+ try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
- self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), channel_state, chan),
+ self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan),
// Note that announcement_signatures fails if the channel cannot be announced,
// so get_channel_update_for_broadcast will never fail by the time we get here.
update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
/// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
- Some(chan_id) => chan_id.clone(),
+ let chan_id = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
+ Some((_cp_id, chan_id)) => chan_id.clone(),
None => {
// It's not a local channel
return Ok(NotifyOption::SkipPersist)
}
};
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
match channel_state.by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan) => {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersist);
} else {
- try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+ log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
+ try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
}
},
- hash_map::Entry::Vacant(_) => unreachable!()
+ hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
}
Ok(NotifyOption::DoPersist)
}
// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash,
- &*self.best_block.read().unwrap()), channel_state, chan);
+ &*self.best_block.read().unwrap()), chan);
let mut channel_update = None;
if let Some(msg) = responses.shutdown_msg {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
chan_restoration_res = handle_chan_restoration_locked!(
self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
- responses.mon_update, Vec::new(), None, responses.funding_locked, responses.announcement_sigs);
+ responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
}
};
post_handle_chan_restoration!(self, chan_restoration_res);
- self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
+ self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
- if let Some(funding_locked_msg) = need_lnd_workaround {
- self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
+ if let Some(channel_ready_msg) = need_lnd_workaround {
+ self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
}
Ok(())
}
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for monitor_event in pending_monitor_events.drain(..) {
- match monitor_event {
- MonitorEvent::HTLCEvent(htlc_update) => {
- if let Some(preimage) = htlc_update.payment_preimage {
- log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true);
- } else {
- log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
- }
- },
- MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
- MonitorEvent::UpdateFailed(funding_outpoint) => {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
- let by_id = &mut channel_state.by_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
- let mut chan = remove_channel!(self, channel_state, chan_entry);
- failed_channels.push(chan.force_shutdown(false));
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
+ for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
+ for monitor_event in monitor_events.drain(..) {
+ match monitor_event {
+ MonitorEvent::HTLCEvent(htlc_update) => {
+ if let Some(preimage) = htlc_update.payment_preimage {
+ log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
+ } else {
+ log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+ let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+ self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
+ }
+ },
+ MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
+ MonitorEvent::UpdateFailed(funding_outpoint) => {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_lock;
+ let by_id = &mut channel_state.by_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
+ let mut chan = remove_channel!(self, chan_entry);
+ failed_channels.push(chan.force_shutdown(false));
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+ ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+ } else {
+ ClosureReason::CommitmentTxConfirmed
+ };
+ self.issue_channel_close_events(&chan, reason);
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ },
});
}
- let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
- ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
- } else {
- ClosureReason::CommitmentTxConfirmed
- };
- self.issue_channel_close_events(&chan, reason);
- pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: chan.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
- },
- });
- }
- },
- MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
- self.channel_monitor_updated(&funding_txo, monitor_update_id);
- },
+ },
+ MonitorEvent::Completed { funding_txo, monitor_update_id } => {
+ self.channel_monitor_updated(&funding_txo, monitor_update_id);
+ },
+ }
}
}
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let by_id = &mut channel_state.by_id;
- let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
by_id.retain(|channel_id, chan| {
match chan.maybe_free_holding_cell_htlcs(&self.logger) {
Ok((commitment_opt, holding_cell_failed_htlcs)) => {
if !holding_cell_failed_htlcs.is_empty() {
- failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
+ failed_htlcs.push((
+ holding_cell_failed_htlcs,
+ *channel_id,
+ chan.get_counterparty_node_id()
+ ));
}
if let Some((commitment_update, monitor_update)) = commitment_opt {
- if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
- has_monitor_update = true;
- let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
- handle_errors.push((chan.get_counterparty_node_id(), res));
- if close_channel { return false; }
- } else {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get_counterparty_node_id(),
- updates: commitment_update,
- });
+ match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: commitment_update,
+ });
+ },
+ e => {
+ has_monitor_update = true;
+ let (res, close_channel) = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
+ handle_errors.push((chan.get_counterparty_node_id(), res));
+ if close_channel { return false; }
+ },
}
}
true
},
Err(e) => {
- let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+ let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
// ChannelClosed event is generated by handle_error for us
!close_channel
}
let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
- for (failures, channel_id) in failed_htlcs.drain(..) {
- self.fail_holding_cell_htlcs(failures, channel_id);
+ for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
+ self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
}
for (counterparty_node_id, err) in handle_errors.drain(..) {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let by_id = &mut channel_state.by_id;
- let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
by_id.retain(|channel_id, chan| {
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
self.tx_broadcaster.broadcast_transaction(&tx);
- update_maps_on_chan_removal!(self, short_to_id, chan);
+ update_maps_on_chan_removal!(self, chan);
false
} else { true }
},
Err(e) => {
has_update = true;
- let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+ let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
!close_channel
}
///
/// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
pub fn get_phantom_scid(&self) -> u64 {
- let mut channel_state = self.channel_state.lock().unwrap();
- let best_block = self.best_block.read().unwrap();
+ let best_block_height = self.best_block.read().unwrap().height();
+ let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
- let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
+ let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
// Ensure the generated scid doesn't conflict with a real channel.
- match channel_state.short_to_id.entry(scid_candidate) {
- hash_map::Entry::Occupied(_) => continue,
- hash_map::Entry::Vacant(_) => return scid_candidate
+ match short_to_chan_info.get(&scid_candidate) {
+ Some(_) => continue,
+ None => return scid_candidate
}
}
}
}
}
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, K, F, L>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
}
}
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<Signer, M, T, K, F, L>
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<M, T, K, F, L>
where
- M::Target: chain::Watch<Signer>,
+ M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
///
/// An [`EventHandler`] may safely call back to the provider in order to handle an event.
/// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
- ///
- /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared
- /// when processed, an [`EventHandler`] must be able to handle previously seen events when
- /// restarting from an old state.
fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut result = NotifyOption::SkipPersist;
}
}
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Listen for ChannelManager<Signer, M, T, K, F, L>
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Listen for ChannelManager<M, T, K, F, L>
where
- M::Target: chain::Watch<Signer>,
+ M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
}
}
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Confirm for ChannelManager<Signer, M, T, K, F, L>
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, K, F, L>
where
- M::Target: chain::Watch<Signer>,
+ M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
}
}
}
- max_time!(self.last_node_announcement_serial);
max_time!(self.highest_seen_timestamp);
let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
payment_secrets.retain(|_, inbound_payment| {
inbound_payment.expiry_time > header.time as u64
});
-
- let mut outbounds = self.pending_outbound_payments.lock().unwrap();
- let mut pending_events = self.pending_events.lock().unwrap();
- outbounds.retain(|payment_id, payment| {
- if payment.remaining_parts() != 0 { return true }
- if let PendingOutboundPayment::Retryable { starting_block_height, payment_hash, .. } = payment {
- if *starting_block_height + PAYMENT_EXPIRY_BLOCKS <= height {
- log_info!(self.logger, "Timing out payment with id {} and hash {}", log_bytes!(payment_id.0), log_bytes!(payment_hash.0));
- pending_events.push(events::Event::PaymentFailed {
- payment_id: *payment_id, payment_hash: *payment_hash,
- });
- false
- } else { true }
- } else { true }
- });
}
fn get_relevant_txids(&self) -> Vec<Txid> {
let channel_state = self.channel_state.lock().unwrap();
- let mut res = Vec::with_capacity(channel_state.short_to_id.len());
+ let mut res = Vec::with_capacity(channel_state.by_id.len());
for chan in channel_state.by_id.values() {
if let Some(funding_txo) = chan.get_funding_txo() {
res.push(funding_txo.txid);
}
}
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F, L>
where
- M::Target: chain::Watch<Signer>,
+ M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
/// Calls a function which handles an on-chain event (blocks dis/connected, transactions
/// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
/// the function.
- fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
+ fn do_chain_event<FN: Fn(&mut Channel<<K::Target as KeysInterface>::Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
(&self, height_opt: Option<u32>, f: FN) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
{
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
- let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, channel| {
let res = f(channel);
- if let Ok((funding_locked_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
+ if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
failure_code, data,
- }));
+ }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
}
- if let Some(funding_locked) = funding_locked_opt {
- send_funding_locked!(short_to_id, pending_msg_events, channel, funding_locked);
+ if let Some(channel_ready) = channel_ready_opt {
+ send_channel_ready!(self, pending_msg_events, channel, channel_ready);
if channel.is_usable() {
- log_trace!(self.logger, "Sending funding_locked with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+ log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: channel.get_counterparty_node_id(),
});
}
} else {
- log_trace!(self.logger, "Sending funding_locked WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
+ log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
}
}
+
+ emit_channel_ready_event!(self, channel);
+
if let Some(announcement_sigs) = announcement_sigs {
log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
}
}
}
+ if channel.is_our_channel_ready() {
+ if let Some(real_scid) = channel.get_short_channel_id() {
+ // If we sent a 0conf channel_ready, and now have an SCID, we add it
+ // to the short_to_chan_info map here. Note that we check whether we
+ // can relay using the real SCID at relay-time (i.e.
+ // enforce option_scid_alias then), and if the funding tx is ever
+ // un-confirmed we force-close the channel, ensuring short_to_chan_info
+ // is always consistent.
+ let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
+ let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
+ assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
+ "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
+ fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
+ }
+ }
} else if let Err(reason) = res {
- update_maps_on_chan_removal!(self, short_to_id, channel);
+ update_maps_on_chan_removal!(self, channel);
// It looks like our counterparty went on-chain or funding transaction was
// reorged out of the main chain. Close the channel.
failed_channels.push(channel.force_shutdown(true));
}
true
});
+ }
- if let Some(height) = height_opt {
- channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
- htlcs.retain(|htlc| {
- // If height is approaching the number of blocks we think it takes us to get
- // our commitment transaction confirmed before the HTLC expires, plus the
- // number of blocks we generally consider it to take to do a commitment update,
- // just give up on it and fail the HTLC.
- if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
- timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
- failure_code: 0x4000 | 15,
- data: htlc_msat_height_data
- }));
- false
- } else { true }
- });
- !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ if let Some(height) = height_opt {
+ self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| {
+ htlcs.retain(|htlc| {
+ // If height is approaching the number of blocks we think it takes us to get
+ // our commitment transaction confirmed before the HTLC expires, plus the
+ // number of blocks we generally consider it to take to do a commitment update,
+ // just give up on it and fail the HTLC.
+ if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
+ timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+ failure_code: 0x4000 | 15,
+ data: htlc_msat_height_data
+ }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
+ false
+ } else { true }
});
- }
+ !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
+ });
}
self.handle_init_event_channel_failures(failed_channels);
- for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
+ for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
+ self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination);
}
}
self.persistence_notifier.wait()
}
+ /// Gets a [`Future`] that completes when a persistable update is available. Note that
+ /// callbacks registered on the [`Future`] MUST NOT call back into this [`ChannelManager`] and
+ /// should instead register actions to be taken later.
+ pub fn get_persistable_update_future(&self) -> Future {
+ self.persistence_notifier.get_future()
+ }
+
#[cfg(any(test, feature = "_test_utils"))]
pub fn get_persistence_condvar_value(&self) -> bool {
- let mutcond = &self.persistence_notifier.persistence_lock;
- let &(ref mtx, _) = mutcond;
- let guard = mtx.lock().unwrap();
- *guard
+ self.persistence_notifier.notify_pending()
}
/// Gets the latest best block which was connected either via the [`chain::Listen`] or
}
}
-impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
- ChannelMessageHandler for ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
+impl<M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
+ ChannelMessageHandler for ChannelManager<M, T, K, F, L>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
}
- fn handle_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) {
+ fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
+ let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
- let short_to_id = &mut channel_state.short_to_id;
- if no_connection_possible {
- log_debug!(self.logger, "Failing all channels with {} due to no_connection_possible", log_pubkey!(counterparty_node_id));
- channel_state.by_id.retain(|_, chan| {
- if chan.get_counterparty_node_id() == *counterparty_node_id {
- update_maps_on_chan_removal!(self, short_to_id, chan);
- failed_channels.push(chan.force_shutdown(true));
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
+ log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
+ log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
+ channel_state.by_id.retain(|_, chan| {
+ if chan.get_counterparty_node_id() == *counterparty_node_id {
+ chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+ if chan.is_shutdown() {
+ update_maps_on_chan_removal!(self, chan);
self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
- false
+ return false;
} else {
- true
- }
- });
- } else {
- log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
- channel_state.by_id.retain(|_, chan| {
- if chan.get_counterparty_node_id() == *counterparty_node_id {
- chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- if chan.is_shutdown() {
- update_maps_on_chan_removal!(self, short_to_id, chan);
- self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
- return false;
- } else {
- no_channels_remain = false;
- }
+ no_channels_remain = false;
}
- true
- })
- }
+ }
+ true
+ });
pending_msg_events.retain(|msg| {
match msg {
&events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != counterparty_node_id,
+ &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id,
+ &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
- &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
&events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
&events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
&events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
}
}
- fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) {
+ fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> {
+ if !init_msg.features.supports_static_remote_key() {
+ log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id));
+ return Err(());
+ }
+
log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let channel_state = &mut *channel_state_lock;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, chan| {
- if chan.get_counterparty_node_id() == *counterparty_node_id {
+ let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
if !chan.have_received_message() {
// If we created this (outbound) channel while we were disconnected from the
// peer we probably failed to send the open_channel message, which is now
});
true
}
- } else { true }
+ } else { true };
+ if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
+ if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) {
+ if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
+ pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
+ node_id: *counterparty_node_id,
+ msg, update_msg,
+ });
+ }
+ }
+ }
+ retain
});
//TODO: Also re-broadcast announcement_signatures
+ Ok(())
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
for chan in self.list_channels() {
if chan.counterparty.node_id == *counterparty_node_id {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data));
+ let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
}
}
} else {
}
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data));
+ let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
}
}
-}
-
-/// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to
-/// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`.
-struct PersistenceNotifier {
- /// Users won't access the persistence_lock directly, but rather wait on its bool using
- /// `wait_timeout` and `wait`.
- persistence_lock: (Mutex<bool>, Condvar),
-}
-impl PersistenceNotifier {
- fn new() -> Self {
- Self {
- persistence_lock: (Mutex::new(false), Condvar::new()),
- }
+ fn provided_node_features(&self) -> NodeFeatures {
+ provided_node_features()
}
- fn wait(&self) {
- loop {
- let &(ref mtx, ref cvar) = &self.persistence_lock;
- let mut guard = mtx.lock().unwrap();
- if *guard {
- *guard = false;
- return;
- }
- guard = cvar.wait(guard).unwrap();
- let result = *guard;
- if result {
- *guard = false;
- return
- }
- }
+ fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
+ provided_init_features()
}
+}
- #[cfg(any(test, feature = "std"))]
- fn wait_timeout(&self, max_wait: Duration) -> bool {
- let current_time = Instant::now();
- loop {
- let &(ref mtx, ref cvar) = &self.persistence_lock;
- let mut guard = mtx.lock().unwrap();
- if *guard {
- *guard = false;
- return true;
- }
- guard = cvar.wait_timeout(guard, max_wait).unwrap().0;
- // Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
- // desired wait time has actually passed, and if not then restart the loop with a reduced wait
- // time. Note that this logic can be highly simplified through the use of
- // `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
- // 1.42.0.
- let elapsed = current_time.elapsed();
- let result = *guard;
- if result || elapsed >= max_wait {
- *guard = false;
- return result;
- }
- match max_wait.checked_sub(elapsed) {
- None => return result,
- Some(_) => continue
- }
- }
- }
+/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+pub fn provided_node_features() -> NodeFeatures {
+ provided_init_features().to_context()
+}
- // Signal to the ChannelManager persister that there are updates necessitating persisting to disk.
- fn notify(&self) {
- let &(ref persist_mtx, ref cnd) = &self.persistence_lock;
- let mut persistence_lock = persist_mtx.lock().unwrap();
- *persistence_lock = true;
- mem::drop(persistence_lock);
- cnd.notify_all();
- }
+/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+///
+/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
+/// or not. Thus, this method is not public.
+#[cfg(any(feature = "_test_utils", test))]
+pub fn provided_invoice_features() -> InvoiceFeatures {
+ provided_init_features().to_context()
+}
+
+/// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+pub fn provided_channel_features() -> ChannelFeatures {
+ provided_init_features().to_context()
+}
+
+/// Fetches the set of [`InitFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+pub fn provided_init_features() -> InitFeatures {
+ // Note that if new features are added here which other peers may (eventually) require, we
+ // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for
+ // ErroringMessageHandler.
+ let mut features = InitFeatures::empty();
+ features.set_data_loss_protect_optional();
+ features.set_upfront_shutdown_script_optional();
+ features.set_variable_length_onion_required();
+ features.set_static_remote_key_required();
+ features.set_payment_secret_required();
+ features.set_basic_mpp_optional();
+ features.set_wumbo_optional();
+ features.set_shutdown_any_segwit_optional();
+ features.set_channel_type_optional();
+ features.set_scid_privacy_optional();
+ features.set_zero_conf_optional();
+ features
}
const SERIALIZATION_VERSION: u8 = 1;
(2, channel_id, required),
(3, channel_type, option),
(4, counterparty, required),
+ (5, outbound_scid_alias, option),
(6, funding_txo, option),
+ (7, config, option),
(8, short_channel_id, option),
(10, channel_value_satoshis, required),
(12, unspendable_punishment_reserve, option),
(18, outbound_capacity_msat, required),
// Note that by the time we get past the required read above, outbound_capacity_msat will be
// filled in, so we can safely unwrap it here.
- (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap())),
+ (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
(20, inbound_capacity_msat, required),
(22, confirmations_required, option),
(24, force_close_spend_delay, option),
(26, is_outbound, required),
- (28, is_funding_locked, required),
+ (28, is_channel_ready, required),
(30, is_usable, required),
(32, is_public, required),
(33, inbound_htlc_minimum_msat, option),
impl Writeable for ClaimableHTLC {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- let payment_data = match &self.onion_payload {
- OnionPayload::Invoice(data) => Some(data.clone()),
- _ => None,
+ let (payment_data, keysend_preimage) = match &self.onion_payload {
+ OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
+ OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
};
- let keysend_preimage = match self.onion_payload {
- OnionPayload::Invoice(_) => None,
- OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
- };
- write_tlv_fields!
- (writer,
- {
- (0, self.prev_hop, required), (2, self.value, required),
- (4, payment_data, option), (6, self.cltv_expiry, required),
- (8, keysend_preimage, option),
- });
+ write_tlv_fields!(writer, {
+ (0, self.prev_hop, required),
+ (1, self.total_msat, required),
+ (2, self.value, required),
+ (4, payment_data, option),
+ (6, self.cltv_expiry, required),
+ (8, keysend_preimage, option),
+ });
Ok(())
}
}
impl Readable for ClaimableHTLC {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- let mut prev_hop = ::util::ser::OptionDeserWrapper(None);
+ let mut prev_hop = crate::util::ser::OptionDeserWrapper(None);
let mut value = 0;
let mut payment_data: Option<msgs::FinalOnionHopData> = None;
let mut cltv_expiry = 0;
+ let mut total_msat = None;
let mut keysend_preimage: Option<PaymentPreimage> = None;
- read_tlv_fields!
- (reader,
- {
- (0, prev_hop, required), (2, value, required),
- (4, payment_data, option), (6, cltv_expiry, required),
- (8, keysend_preimage, option)
- });
+ read_tlv_fields!(reader, {
+ (0, prev_hop, required),
+ (1, total_msat, option),
+ (2, value, required),
+ (4, payment_data, option),
+ (6, cltv_expiry, required),
+ (8, keysend_preimage, option)
+ });
let onion_payload = match keysend_preimage {
Some(p) => {
if payment_data.is_some() {
return Err(DecodeError::InvalidValue)
}
+ if total_msat.is_none() {
+ total_msat = Some(value);
+ }
OnionPayload::Spontaneous(p)
},
None => {
- if payment_data.is_none() {
- return Err(DecodeError::InvalidValue)
+ if total_msat.is_none() {
+ if payment_data.is_none() {
+ return Err(DecodeError::InvalidValue)
+ }
+ total_msat = Some(payment_data.as_ref().unwrap().total_msat);
}
- OnionPayload::Invoice(payment_data.unwrap())
+ OnionPayload::Invoice { _legacy_hop_data: payment_data }
},
};
Ok(Self {
prev_hop: prev_hop.0.unwrap(),
timer_ticks: 0,
value,
+ total_msat: total_msat.unwrap(),
onion_payload,
cltv_expiry,
})
let id: u8 = Readable::read(reader)?;
match id {
0 => {
- let mut session_priv: ::util::ser::OptionDeserWrapper<SecretKey> = ::util::ser::OptionDeserWrapper(None);
+ let mut session_priv: crate::util::ser::OptionDeserWrapper<SecretKey> = crate::util::ser::OptionDeserWrapper(None);
let mut first_hop_htlc_msat: u64 = 0;
let mut path = Some(Vec::new());
let mut payment_id = None;
}
Ok(HTLCSource::OutboundRoute {
session_priv: session_priv.0.unwrap(),
- first_hop_htlc_msat: first_hop_htlc_msat,
+ first_hop_htlc_msat,
path: path.unwrap(),
payment_id: payment_id.unwrap(),
payment_secret,
}
impl Writeable for HTLCSource {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::io::Error> {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
match self {
HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payment_params } => {
0u8.write(writer)?;
(1, Fulfilled) => {
(0, session_privs, required),
(1, payment_hash, option),
+ (3, timer_ticks_without_htlcs, (default_value, 0)),
},
(2, Retryable) => {
(0, session_privs, required),
},
);
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
+impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<M, T, K, F, L>
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
best_block.block_hash().write(writer)?;
}
- let channel_state = self.channel_state.lock().unwrap();
- let mut unfunded_channels = 0;
- for (_, channel) in channel_state.by_id.iter() {
- if !channel.is_funding_initiated() {
- unfunded_channels += 1;
+ {
+ // Take `channel_state` lock temporarily to avoid creating a lock order that requires
+ // that the `forward_htlcs` lock is taken after `channel_state`
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut unfunded_channels = 0;
+ for (_, channel) in channel_state.by_id.iter() {
+ if !channel.is_funding_initiated() {
+ unfunded_channels += 1;
+ }
}
- }
- ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
- for (_, channel) in channel_state.by_id.iter() {
- if channel.is_funding_initiated() {
- channel.write(writer)?;
+ ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
+ for (_, channel) in channel_state.by_id.iter() {
+ if channel.is_funding_initiated() {
+ channel.write(writer)?;
+ }
}
}
- (channel_state.forward_htlcs.len() as u64).write(writer)?;
- for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
- short_channel_id.write(writer)?;
- (pending_forwards.len() as u64).write(writer)?;
- for forward in pending_forwards {
- forward.write(writer)?;
+ {
+ let forward_htlcs = self.forward_htlcs.lock().unwrap();
+ (forward_htlcs.len() as u64).write(writer)?;
+ for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
+ short_channel_id.write(writer)?;
+ (pending_forwards.len() as u64).write(writer)?;
+ for forward in pending_forwards {
+ forward.write(writer)?;
+ }
}
}
- (channel_state.claimable_htlcs.len() as u64).write(writer)?;
- for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
+ let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
+ let claimable_htlcs = self.claimable_htlcs.lock().unwrap();
+ let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+
+ let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
+ (claimable_htlcs.len() as u64).write(writer)?;
+ for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() {
payment_hash.write(writer)?;
(previous_hops.len() as u64).write(writer)?;
for htlc in previous_hops.iter() {
htlc.write(writer)?;
}
+ htlc_purposes.push(purpose);
}
let per_peer_state = self.per_peer_state.write().unwrap();
peer_state.latest_features.write(writer)?;
}
- let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
- let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
let events = self.pending_events.lock().unwrap();
(events.len() as u64).write(writer)?;
for event in events.iter() {
}
}
- (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
+ // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
+ // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
+ // likely to be identical.
+ (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
(pending_inbound_payments.len() as u64).write(writer)?;
(3, pending_outbound_payments, required),
(5, self.our_network_pubkey, required),
(7, self.fake_scid_rand_bytes, required),
+ (9, htlc_purposes, vec_type),
+ (11, self.probing_cookie_secret, required),
});
Ok(())
// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
// SipmleArcChannelManager type:
-impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<Signer, M, T, K, F, L>>)
- where M::Target: chain::Watch<Signer>,
+impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ ReadableArgs<ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<M, T, K, F, L>>)
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
- let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
+ fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, K, F, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
}
-impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (BlockHash, ChannelManager<Signer, M, T, K, F, L>)
- where M::Target: chain::Watch<Signer>,
+impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ ReadableArgs<ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>> for (BlockHash, ChannelManager<M, T, K, F, L>)
+ where M::Target: chain::Watch<<K::Target as KeysInterface>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
+ K::Target: KeysInterface,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, <K::Target as KeysInterface>::Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
let channel_count: u64 = Readable::read(reader)?;
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
- let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = Vec::new();
for _ in 0..channel_count {
- let mut channel: Channel<Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
+ let mut channel: Channel<<K::Target as KeysInterface>::Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
} else {
log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
if let Some(short_channel_id) = channel.get_short_channel_id() {
- short_to_id.insert(short_channel_id, channel.channel_id());
+ short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
+ }
+ if channel.is_funding_initiated() {
+ id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
}
by_id.insert(channel.channel_id(), channel);
}
+ } else if channel.is_awaiting_initial_mon_persist() {
+ // If we were persisted and shut down while the initial ChannelMonitor persistence
+ // was in-progress, we never broadcasted the funding transaction and can still
+ // safely discard the channel.
+ let _ = channel.force_shutdown(false);
+ channel_closures.push(events::Event::ChannelClosed {
+ channel_id: channel.channel_id(),
+ user_channel_id: channel.get_user_id(),
+ reason: ClosureReason::DisconnectedPeer,
+ });
} else {
log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
}
let claimable_htlcs_count: u64 = Readable::read(reader)?;
- let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
+ let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
for _ in 0..claimable_htlcs_count {
let payment_hash = Readable::read(reader)?;
let previous_hops_len: u64 = Readable::read(reader)?;
let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
for _ in 0..previous_hops_len {
- previous_hops.push(Readable::read(reader)?);
+ previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
}
- claimable_htlcs.insert(payment_hash, previous_hops);
+ claimable_htlcs_list.push((payment_hash, previous_hops));
}
let peer_count: u64 = Readable::read(reader)?;
}
}
- let last_node_announcement_serial: u32 = Readable::read(reader)?;
+ let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
let highest_seen_timestamp: u32 = Readable::read(reader)?;
let pending_inbound_payment_count: u64 = Readable::read(reader)?;
let mut pending_outbound_payments = None;
let mut received_network_pubkey: Option<PublicKey> = None;
let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
+ let mut probing_cookie_secret: Option<[u8; 32]> = None;
+ let mut claimable_htlc_purposes = None;
read_tlv_fields!(reader, {
(1, pending_outbound_payments_no_retry, option),
(3, pending_outbound_payments, option),
(5, received_network_pubkey, option),
(7, fake_scid_rand_bytes, option),
+ (9, claimable_htlc_purposes, vec_type),
+ (11, probing_cookie_secret, option),
});
if fake_scid_rand_bytes.is_none() {
fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes());
}
+ if probing_cookie_secret.is_none() {
+ probing_cookie_secret = Some(args.keys_manager.get_secure_random_bytes());
+ }
+
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
pending_outbound_payments = Some(pending_outbound_payments_compat);
} else if pending_outbound_payments.is_none() {
// payments which are still in-flight via their on-chain state.
// We only rebuild the pending payments map if we were most recently serialized by
// 0.0.102+
- for (_, monitor) in args.channel_monitors {
+ for (_, monitor) in args.channel_monitors.iter() {
if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
}
}
+ let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
+ let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
+
+ let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len());
+ if let Some(mut purposes) = claimable_htlc_purposes {
+ if purposes.len() != claimable_htlcs_list.len() {
+ return Err(DecodeError::InvalidValue);
+ }
+ for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) {
+ claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+ }
+ } else {
+ // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
+ // include a `_legacy_hop_data` in the `OnionPayload`.
+ for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) {
+ if previous_hops.is_empty() {
+ return Err(DecodeError::InvalidValue);
+ }
+ let purpose = match &previous_hops[0].onion_payload {
+ OnionPayload::Invoice { _legacy_hop_data } => {
+ if let Some(hop_data) = _legacy_hop_data {
+ events::PaymentPurpose::InvoicePayment {
+ payment_preimage: match pending_inbound_payments.get(&payment_hash) {
+ Some(inbound_payment) => inbound_payment.payment_preimage,
+ None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
+ Ok(payment_preimage) => payment_preimage,
+ Err(()) => {
+ log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0));
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ },
+ payment_secret: hop_data.payment_secret,
+ }
+ } else { return Err(DecodeError::InvalidValue); }
+ },
+ OnionPayload::Spontaneous(payment_preimage) =>
+ events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
+ };
+ claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
+ }
+ }
+
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
return Err(DecodeError::InvalidValue);
}
if chan.is_usable() {
- if short_to_id.insert(chan.outbound_scid_alias(), *chan_id).is_some() {
+ if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
// Note that in rare cases its possible to hit this while reading an older
// channel if we just happened to pick a colliding outbound alias above.
log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
}
}
- let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
- let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
+ let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
+
+ for (_, monitor) in args.channel_monitors.iter() {
+ for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
+ if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
+ log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
+ let mut claimable_amt_msat = 0;
+ for claimable_htlc in claimable_htlcs {
+ claimable_amt_msat += claimable_htlc.value;
+
+ // Add a holding-cell claim of the payment to the Channel, which should be
+ // applied ~immediately on peer reconnection. Because it won't generate a
+ // new commitment transaction we can just provide the payment preimage to
+ // the corresponding ChannelMonitor and nothing else.
+ //
+ // We do so directly instead of via the normal ChannelMonitor update
+ // procedure as the ChainMonitor hasn't yet been initialized, implying
+ // we're not allowed to call it directly yet. Further, we do the update
+ // without incrementing the ChannelMonitor update ID as there isn't any
+ // reason to.
+ // If we were to generate a new ChannelMonitor update ID here and then
+ // crash before the user finishes block connect we'd end up force-closing
+ // this channel as well. On the flip side, there's no harm in restarting
+ // without the new monitor persisted - we'll end up right back here on
+ // restart.
+ let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
+ if let Some(channel) = by_id.get_mut(&previous_channel_id) {
+ channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
+ }
+ if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
+ previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
+ }
+ }
+ pending_events_read.push(events::Event::PaymentClaimed {
+ payment_hash,
+ purpose: payment_purpose,
+ amount_msat: claimable_amt_msat,
+ });
+ }
+ }
+ }
+
let channel_manager = ChannelManager {
genesis_hash,
- fee_estimator: args.fee_estimator,
+ fee_estimator: bounded_fee_estimator,
chain_monitor: args.chain_monitor,
tx_broadcaster: args.tx_broadcaster,
channel_state: Mutex::new(ChannelHolder {
by_id,
- short_to_id,
- forward_htlcs,
- claimable_htlcs,
pending_msg_events: Vec::new(),
}),
inbound_payment_key: expanded_inbound_key,
pending_inbound_payments: Mutex::new(pending_inbound_payments),
pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
+ forward_htlcs: Mutex::new(forward_htlcs),
+ claimable_htlcs: Mutex::new(claimable_htlcs),
outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
+ id_to_peer: Mutex::new(id_to_peer),
+ short_to_chan_info: FairRwLock::new(short_to_chan_info),
fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
+ probing_cookie_secret: probing_cookie_secret.unwrap(),
+
our_network_key,
our_network_pubkey,
secp_ctx,
- last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
per_peer_state: RwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
pending_background_events: Mutex::new(pending_background_events_read),
total_consistency_lock: RwLock::new(()),
- persistence_notifier: PersistenceNotifier::new(),
+ persistence_notifier: Notifier::new(),
keys_manager: args.keys_manager,
logger: args.logger,
};
for htlc_source in failed_htlcs.drain(..) {
- channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+ channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
use bitcoin::hashes::sha256::Hash as Sha256;
use core::time::Duration;
use core::sync::atomic::Ordering;
- use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
- use ln::channelmanager::{PaymentId, PaymentSendFailure};
- use ln::channelmanager::inbound_payment;
- use ln::features::InitFeatures;
- use ln::functional_test_utils::*;
- use ln::msgs;
- use ln::msgs::ChannelMessageHandler;
- use routing::router::{PaymentParameters, RouteParameters, find_route};
- use util::errors::APIError;
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
- use util::test_utils;
- use chain::keysinterface::KeysInterface;
-
- #[cfg(feature = "std")]
- #[test]
- fn test_wait_timeout() {
- use ln::channelmanager::PersistenceNotifier;
- use sync::Arc;
- use core::sync::atomic::AtomicBool;
- use std::thread;
-
- let persistence_notifier = Arc::new(PersistenceNotifier::new());
- let thread_notifier = Arc::clone(&persistence_notifier);
-
- let exit_thread = Arc::new(AtomicBool::new(false));
- let exit_thread_clone = exit_thread.clone();
- thread::spawn(move || {
- loop {
- let &(ref persist_mtx, ref cnd) = &thread_notifier.persistence_lock;
- let mut persistence_lock = persist_mtx.lock().unwrap();
- *persistence_lock = true;
- cnd.notify_all();
-
- if exit_thread_clone.load(Ordering::SeqCst) {
- break
- }
- }
- });
-
- // Check that we can block indefinitely until updates are available.
- let _ = persistence_notifier.wait();
-
- // Check that the PersistenceNotifier will return after the given duration if updates are
- // available.
- loop {
- if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
- break
- }
- }
-
- exit_thread.store(true, Ordering::SeqCst);
-
- // Check that the PersistenceNotifier will return after the given duration even if no updates
- // are available.
- loop {
- if !persistence_notifier.wait_timeout(Duration::from_millis(100)) {
- break
- }
- }
- }
+ use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
+ use crate::ln::channelmanager::{self, inbound_payment, PaymentId, PaymentSendFailure};
+ use crate::ln::functional_test_utils::*;
+ use crate::ln::msgs;
+ use crate::ln::msgs::ChannelMessageHandler;
+ use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
+ use crate::util::errors::APIError;
+ use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+ use crate::util::test_utils;
+ use crate::chain::keysinterface::KeysInterface;
#[test]
fn test_notify_limits() {
assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
- let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
// We check that the channel info nodes have doesn't change too early, even though we try
// to connect messages with new values
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
// First, send a partial MPP payment.
let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
+ let mut mpp_route = route.clone();
+ mpp_route.paths.push(mpp_route.paths[0].clone());
+
let payment_id = PaymentId([42; 32]);
// Use the utility function send_payment_along_path to send the payment with MPP data which
// indicates there are more HTLCs coming.
let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
- nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+ let session_privs = nodes[0].node.add_new_pending_payment(our_payment_hash, Some(payment_secret), payment_id, &mpp_route).unwrap();
+ nodes[0].node.send_payment_along_path(&mpp_route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
// Next, send a keysend payment with the same payment_hash and make sure it fails.
- nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
expect_payment_failed!(nodes[0], our_payment_hash, true);
// Send the second half of the original MPP payment.
- nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
+ nodes[0].node.send_payment_along_path(&mpp_route.paths[1], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
// claim_funds_along_route because the ordering of the messages causes the second half of the
// payment to be put in the holding cell, which confuses the test utilities. So we exchange the
// lightning messages manually.
- assert!(nodes[1].node.claim_funds(payment_preimage));
+ nodes[1].node.claim_funds(payment_preimage);
+ expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
check_added_monitors!(nodes[1], 2);
+
let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
let scorer = test_utils::TestScorer::with_penalty(0);
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let route = find_route(
- &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None,
- nodes[0].logger, &scorer, &random_seed_bytes
+ &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
+ None, nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
- nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
+ // fails), the second will process the resulting failure and fail the HTLC backward
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// To start (2), send a keysend payment but don't claim it.
let payment_preimage = PaymentPreimage([42; 32]);
let route = find_route(
- &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None,
- nodes[0].logger, &scorer, &random_seed_bytes
+ &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
+ None, nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
- let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
+ let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage), PaymentId(payment_preimage.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
// Next, attempt a regular payment and make sure it fails.
let payment_secret = PaymentSecret([43; 32]);
- nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey),
- final_value_msat: 10000,
+ final_value_msat: 10_000,
final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph;
let scorer = test_utils::TestScorer::with_penalty(0);
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
- &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+ &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let mismatch_payment_hash = PaymentHash([43; 32]);
- let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap();
+ let session_privs = nodes[0].node.add_new_pending_payment(mismatch_payment_hash, None, PaymentId(mismatch_payment_hash.0), &route).unwrap();
+ nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
+ nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
let route_params = RouteParameters {
payment_params: PaymentParameters::for_keysend(payee_pubkey),
- final_value_msat: 10000,
+ final_value_msat: 10_000,
final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph;
let scorer = test_utils::TestScorer::with_penalty(0);
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
- &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
+ &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
nodes[0].logger, &scorer, &random_seed_bytes
).unwrap();
let test_preimage = PaymentPreimage([42; 32]);
let test_secret = PaymentSecret([43; 32]);
let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
- let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap();
+ let session_privs = nodes[0].node.add_new_pending_payment(payment_hash, Some(test_secret), PaymentId(payment_hash.0), &route).unwrap();
+ nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), PaymentId(payment_hash.0), None, session_privs).unwrap();
check_added_monitors!(nodes[0], 1);
let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
- let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
- let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
+ let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
// Marshall an MPP route.
let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
route.paths[1][0].short_channel_id = chan_2_id;
route.paths[1][1].short_channel_id = chan_4_id;
- match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() {
+ match nodes[0].node.send_payment(&route, payment_hash, &None, PaymentId(payment_hash.0)).unwrap_err() {
PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) },
_ => panic!("unexpected error")
// payment verification fails as expected.
let mut bad_payment_hash = payment_hash.clone();
bad_payment_hash.0[0] += 1;
- match inbound_payment::verify(bad_payment_hash, payment_data.clone(), nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
+ match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
Ok(_) => panic!("Unexpected ok"),
Err(()) => {
nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1);
}
// Check that using the original payment hash succeeds.
- assert!(inbound_payment::verify(payment_hash, payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
+ assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
+ }
+
+ #[test]
+ fn test_id_to_peer_coverage() {
+ // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned
+ // a `channel_id` (i.e. have had the funding tx created), and that they are removed once
+ // the channel is successfully closed.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
+ let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &open_channel);
+ let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &accept_channel);
+
+ let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
+ let channel_id = &tx.txid().into_inner();
+ {
+ // Ensure that the `id_to_peer` map is empty until either party has received the
+ // funding transaction, and have the real `channel_id`.
+ assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+ assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+ }
+
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
+ {
+ // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as
+ // as it has the funding transaction.
+ let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+ assert_eq!(nodes_0_lock.len(), 1);
+ assert!(nodes_0_lock.contains_key(channel_id));
+
+ assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+ }
+
+ let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
+ {
+ let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+ assert_eq!(nodes_0_lock.len(), 1);
+ assert!(nodes_0_lock.contains_key(channel_id));
+
+ // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
+ // as it has the funding transaction.
+ let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+ assert_eq!(nodes_1_lock.len(), 1);
+ assert!(nodes_1_lock.contains_key(channel_id));
+ }
+ check_added_monitors!(nodes[1], 1);
+ let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
+ check_added_monitors!(nodes[0], 1);
+ let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
+ update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
+
+ nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
+ let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &channelmanager::provided_init_features(), &nodes_1_shutdown);
+
+ let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
+ {
+ // Assert that the channel is kept in the `id_to_peer` map for both nodes until the
+ // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
+ // fee for the closing transaction has been negotiated and the parties has the other
+ // party's signature for the fee negotiated closing transaction.)
+ let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
+ assert_eq!(nodes_0_lock.len(), 1);
+ assert!(nodes_0_lock.contains_key(channel_id));
+
+ // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
+ // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
+ // from `nodes[0]` for the closing transaction with the proposed fee, the channel is
+ // kept in the `nodes[1]`'s `id_to_peer` map.
+ let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+ assert_eq!(nodes_1_lock.len(), 1);
+ assert!(nodes_1_lock.contains_key(channel_id));
+ }
+
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
+ {
+ // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
+ // therefore has all it needs to fully close the channel (both signatures for the
+ // closing transaction).
+ // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be
+ // fully closed by `nodes[0]`.
+ assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
+
+ // Assert that the channel is still in `nodes[1]`'s `id_to_peer` map, as `nodes[1]`
+ // doesn't have `nodes[0]`'s signature for the closing transaction yet.
+ let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
+ assert_eq!(nodes_1_lock.len(), 1);
+ assert!(nodes_1_lock.contains_key(channel_id));
+ }
+
+ let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
+ {
+ // Assert that the channel has now been removed from both parties `id_to_peer` map once
+ // they both have everything required to fully close the channel.
+ assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
+ }
+ let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+
+ check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
+ check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
}
}
#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
pub mod bench {
- use chain::Listen;
- use chain::chainmonitor::{ChainMonitor, Persist};
- use chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner};
- use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
- use ln::features::{InitFeatures, InvoiceFeatures};
- use ln::functional_test_utils::*;
- use ln::msgs::{ChannelMessageHandler, Init};
- use routing::network_graph::NetworkGraph;
- use routing::router::{PaymentParameters, get_route};
- use util::test_utils;
- use util::config::UserConfig;
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use crate::chain::Listen;
+ use crate::chain::chainmonitor::{ChainMonitor, Persist};
+ use crate::chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner};
+ use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
+ use crate::ln::functional_test_utils::*;
+ use crate::ln::msgs::{ChannelMessageHandler, Init};
+ use crate::routing::gossip::NetworkGraph;
+ use crate::routing::router::{PaymentParameters, get_route};
+ use crate::util::test_utils;
+ use crate::util::config::UserConfig;
+ use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
- use bitcoin::{Block, BlockHeader, Transaction, TxOut};
+ use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut};
- use sync::{Arc, Mutex};
+ use crate::sync::{Arc, Mutex};
use test::Bencher;
struct NodeHolder<'a, P: Persist<InMemorySigner>> {
- node: &'a ChannelManager<InMemorySigner,
+ node: &'a ChannelManager<
&'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
&'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
&'a test_utils::TestLogger, &'a P>,
&'a test_utils::TestBroadcaster, &'a KeysManager,
- &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>
+ &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>,
}
#[cfg(test)]
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let mut config: UserConfig = Default::default();
- config.own_channel_config.minimum_depth = 1;
+ config.channel_handshake_config.minimum_depth = 1;
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
});
let node_b_holder = NodeHolder { node: &node_b };
- node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
- node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
- node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
- node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
+ node_b.handle_open_channel(&node_a.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
+ node_a.handle_accept_channel(&node_b.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
let tx;
if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
- tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
value: 8_000_000, script_pubkey: output_script,
}]};
- node_a.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+ node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
} else { panic!(); }
node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
let block = Block {
- header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
txdata: vec![tx],
};
Listen::block_connected(&node_a, &block, 1);
Listen::block_connected(&node_b, &block, 1);
- node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
+ node_a.handle_channel_ready(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
let msg_events = node_a.get_and_clear_pending_msg_events();
assert_eq!(msg_events.len(), 2);
match msg_events[0] {
- MessageSendEvent::SendFundingLocked { ref msg, .. } => {
- node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
+ MessageSendEvent::SendChannelReady { ref msg, .. } => {
+ node_b.handle_channel_ready(&node_a.get_our_node_id(), msg);
get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
},
_ => panic!(),
_ => panic!(),
}
- let dummy_graph = NetworkGraph::new(genesis_hash);
+ let events_a = node_a.get_and_clear_pending_events();
+ assert_eq!(events_a.len(), 1);
+ match events_a[0] {
+ Event::ChannelReady{ ref counterparty_node_id, .. } => {
+ assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let events_b = node_b.get_and_clear_pending_events();
+ assert_eq!(events_b.len(), 1);
+ match events_b[0] {
+ Event::ChannelReady{ ref counterparty_node_id, .. } => {
+ assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let dummy_graph = NetworkGraph::new(genesis_hash, &logger_a);
let mut payment_count: u64 = 0;
macro_rules! send_payment {
($node_a: expr, $node_b: expr) => {
let usable_channels = $node_a.list_usable_channels();
let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id())
- .with_features(InvoiceFeatures::known());
+ .with_features(channelmanager::provided_invoice_features());
let scorer = test_utils::TestScorer::with_penalty(0);
let seed = [3u8; 32];
let keys_manager = KeysManager::new(&seed, 42, 42);
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap();
- $node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ $node_a.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
$node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
$node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
- assert!($node_b.claim_funds(payment_preimage));
+ $node_b.claim_funds(payment_preimage);
+ expect_payment_claimed!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
MessageSendEvent::UpdateHTLCs { node_id, updates } => {