//!
use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::cmp::fixed_time_eq;
-use bitcoin::hash_types::BlockHash;
+use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::secp256k1::key::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1;
use chain;
+use chain::Confirm;
use chain::Watch;
use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use chain::transaction::{OutPoint, TransactionData};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
+use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
pub use ln::channel::CounterpartyForwardingInfo;
-use ln::channel::{Channel, ChannelError};
+use ln::channel::{Channel, ChannelError, ChannelUpdateStatus};
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
use ln::msgs;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
use util::config::UserConfig;
-use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::{byte_utils, events};
use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
use util::chacha20::{ChaCha20, ChaChaReader};
use util::logger::Logger;
use util::errors::APIError;
-use std::{cmp, mem};
+use core::{cmp, mem};
+use std::cell::RefCell;
use std::collections::{HashMap, hash_map, HashSet};
use std::io::{Cursor, Read};
use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::Duration;
+use core::sync::atomic::{AtomicUsize, Ordering};
+use core::time::Duration;
#[cfg(any(test, feature = "allow_wallclock_use"))]
use std::time::Instant;
-use std::marker::{Sync, Send};
-use std::ops::Deref;
+use core::ops::Deref;
use bitcoin::hashes::hex::ToHex;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
},
Receive {
- payment_data: Option<msgs::FinalOnionHopData>,
+ payment_data: msgs::FinalOnionHopData,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
},
}
struct ClaimableHTLC {
prev_hop: HTLCPreviousHopData,
value: u64,
- /// Filled in when the HTLC was received with a payment_secret packet, which contains a
- /// total_msat (which may differ from value if this is a Multi-Path Payment) and a
+ /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
/// payment_secret which prevents path-probing attacks and can associate different HTLCs which
/// are part of the same payment.
- payment_data: Option<msgs::FinalOnionHopData>,
+ payment_data: msgs::FinalOnionHopData,
cltv_expiry: u32,
}
}
}
-/// payment_hash type, use to cross-lock hop
-/// (C-not exported) as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub struct PaymentHash(pub [u8;32]);
-/// payment_preimage type, use to route payment between hop
-/// (C-not exported) as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub struct PaymentPreimage(pub [u8;32]);
-/// payment_secret type, use to authenticate sender to the receiver and tie MPP HTLCs together
-/// (C-not exported) as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub struct PaymentSecret(pub [u8;32]);
-
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// guarantees are made about the existence of a channel with the short id here, nor the short
/// ids in the PendingHTLCInfo!
pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
- /// (payment_hash, payment_secret) -> Vec<HTLCs> for tracking HTLCs that
- /// were to us and can be failed/claimed by the user
+ /// Map from payment hash to any HTLCs which are to us and can be failed/claimed by the user.
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the channels given here actually existing anymore by the time you
/// go to read them!
- claimable_htlcs: HashMap<(PaymentHash, Option<PaymentSecret>), Vec<ClaimableHTLC>>,
+ claimable_htlcs: HashMap<PaymentHash, Vec<ClaimableHTLC>>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
}
/// Events which we process internally but cannot be procsesed immediately at the generation site
-/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with
+/// for some reason. They are handled in timer_tick_occurred, so may be processed with
/// quite some time lag.
enum BackgroundEvent {
/// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
latest_features: InitFeatures,
}
-#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
-const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
+/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
+/// actually ours and not some duplicate HTLC sent to us by a node along the route.
+///
+/// For users who don't want to bother doing their own payment preimage storage, we also store that
+/// here.
+struct PendingInboundPayment {
+ /// The payment secret that the sender must use for us to accept this payment
+ payment_secret: PaymentSecret,
+ /// Time at which this HTLC expires - blocks with a header time above this value will result in
+ /// this payment being removed.
+ expiry_time: u64,
+ /// Arbitrary identifier the user specifies (or not)
+ user_payment_id: u64,
+ // Other required attributes of the payment, optionally enforced:
+ payment_preimage: Option<PaymentPreimage>,
+ min_value_msat: Option<u64>,
+}
/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
-/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
+/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
///
/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
tx_broadcaster: T,
#[cfg(test)]
- pub(super) latest_block_height: AtomicUsize,
+ pub(super) best_block: RwLock<BestBlock>,
#[cfg(not(test))]
- latest_block_height: AtomicUsize,
- last_block_hash: RwLock<BlockHash>,
+ best_block: RwLock<BestBlock>,
secp_ctx: Secp256k1<secp256k1::All>,
#[cfg(any(test, feature = "_test_utils"))]
pub(super) channel_state: Mutex<ChannelHolder<Signer>>,
#[cfg(not(any(test, feature = "_test_utils")))]
channel_state: Mutex<ChannelHolder<Signer>>,
+
+ /// Storage for PaymentSecrets and any requirements on future inbound payments before we will
+ /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements
+ /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
+ /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out.
+ /// Locked *after* channel_state.
+ pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
+
+ /// The session_priv bytes of outbound payments which are pending resolution.
+ /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
+ /// (if the channel has been force-closed), however we track them here to prevent duplicative
+ /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative
+ /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
+ /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
+ /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
+ /// after reloading from disk while replaying blocks against ChannelMonitors.
+ ///
+ /// Locked *after* channel_state.
+ pending_outbound_payments: Mutex<HashSet<[u8; 32]>>,
+
our_network_key: SecretKey,
+ our_network_pubkey: PublicKey,
/// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
/// value increases strictly since we don't assume access to a time source.
last_node_announcement_serial: AtomicUsize,
+ /// The highest block timestamp we've seen, which is usually a good guess at the current time.
+ /// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
+ /// very far in the past, and can only ever be up to two hours in the future.
+ highest_seen_timestamp: AtomicUsize,
+
/// The bulk of our storage will eventually be here (channels and message queues and the like).
/// If we are connected to a peer we always at least have an entry here, even if no channels
/// are currently open with that peer.
/// Essentially just when we're serializing ourselves out.
/// Taken first everywhere where we are making changes before any other locks.
/// When acquiring this lock in read mode, rather than acquiring it directly, call
- /// `PersistenceNotifierGuard::new(..)` and pass the lock to it, to ensure the PersistenceNotifier
- /// the lock contains sends out a notification when the lock is released.
+ /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
+ /// PersistenceNotifier the lock contains sends out a notification when the lock is released.
total_consistency_lock: RwLock<()>,
persistence_notifier: PersistenceNotifier,
/// The network for determining the `chain_hash` in Lightning messages.
pub network: Network,
- /// The hash of the latest block successfully connected.
- pub latest_hash: BlockHash,
-
- /// The height of the latest block successfully connected.
+ /// The hash and height of the latest block successfully connected.
///
/// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
- pub latest_height: usize,
+ pub best_block: BestBlock,
+}
+
+/// The best known block as identified by its hash and height.
+#[derive(Clone, Copy)]
+pub struct BestBlock {
+ block_hash: BlockHash,
+ height: u32,
+}
+
+impl BestBlock {
+ /// Returns the best block from the genesis of the given network.
+ pub fn from_genesis(network: Network) -> Self {
+ BestBlock {
+ block_hash: genesis_block(network).header.block_hash(),
+ height: 0,
+ }
+ }
+
+ /// Returns the best block as identified by the given block hash and height.
+ pub fn new(block_hash: BlockHash, height: u32) -> Self {
+ BestBlock { block_hash, height }
+ }
+
+ /// Returns the best block hash.
+ pub fn block_hash(&self) -> BlockHash { self.block_hash }
+
+ /// Returns the best block height.
+ pub fn height(&self) -> u32 { self.height }
+}
+
+#[derive(Copy, Clone, PartialEq)]
+enum NotifyOption {
+ DoPersist,
+ SkipPersist,
}
/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
/// desirable to notify any listeners on `await_persistable_update_timeout`/
-/// `await_persistable_update` that new updates are available for persistence. Therefore, this
+/// `await_persistable_update` when new updates are available for persistence. Therefore, this
/// struct is responsible for locking the total consistency lock and, upon going out of scope,
/// sending the aforementioned notification (since the lock being released indicates that the
/// updates are ready for persistence).
-struct PersistenceNotifierGuard<'a> {
+///
+/// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
+/// notify or not based on whether relevant changes have been made, providing a closure to
+/// `optionally_notify` which returns a `NotifyOption`.
+struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
persistence_notifier: &'a PersistenceNotifier,
+ should_persist: F,
// We hold onto this result so the lock doesn't get released immediately.
_read_guard: RwLockReadGuard<'a, ()>,
}
-impl<'a> PersistenceNotifierGuard<'a> {
- fn new(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier) -> Self {
+impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
+ fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+ PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
+ }
+
+ fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
let read_guard = lock.read().unwrap();
- Self {
+ PersistenceNotifierGuard {
persistence_notifier: notifier,
+ should_persist: persist_check,
_read_guard: read_guard,
}
}
}
-impl<'a> Drop for PersistenceNotifierGuard<'a> {
+impl<'a, F: Fn() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
fn drop(&mut self) {
- self.persistence_notifier.notify();
+ if (self.should_persist)() == NotifyOption::DoPersist {
+ self.persistence_notifier.notify();
+ }
}
}
pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
-/// HTLC's CLTV. The current default represents roughly six hours of blocks at six blocks/hour.
+/// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour.
///
/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
///
// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
-pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 6;
+pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
+/// Minimum CLTV difference between the current block height and received inbound payments.
+/// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least
+/// this value.
+// Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for
+// any payments to succeed. Further, we don't want payments to fail if a block was found while
+// a payment was being routed, so we add an extra block to be safe.
+pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3;
+
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
// ie that if the next-hop peer fails the HTLC within
// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
-// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
+// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
#[deny(const_err)]
#[allow(dead_code)]
/// Note that this means this value is *not* persistent - it can change once during the
/// lifetime of the channel.
pub channel_id: [u8; 32],
+ /// The Channel's funding transaction output, if we've negotiated the funding transaction with
+ /// our counterparty already.
+ ///
+ /// Note that, if this has been set, `channel_id` will be equivalent to
+ /// `funding_txo.unwrap().to_channel_id()`.
+ pub funding_txo: Option<OutPoint>,
/// The position of the funding transaction in the chain. None if the funding transaction has
/// not yet been confirmed and the channel fully opened.
pub short_channel_id: Option<u64>,
/// Note that there are some corner cases not fully handled here, so the actual available
/// inbound capacity may be slightly higher than this.
pub inbound_capacity_msat: u64,
+ /// True if the channel was initiated (and thus funded) by us.
+ pub is_outbound: bool,
+ /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
+ /// channel is not currently being shut down. `funding_locked` message exchange implies the
+ /// required confirmation count has been reached (and we were connected to the peer at some
+ /// point after the funding transaction received enough confirmations).
+ pub is_funding_locked: bool,
/// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
- /// the peer is connected, and (c) no monitor update failure is pending resolution.
- pub is_live: bool,
-
+ /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
+ /// channel is not currently negotiating a shutdown.
+ ///
+ /// This is a strict superset of `is_funding_locked`.
+ pub is_usable: bool,
+ /// True if this channel is (or will be) publicly-announced.
+ pub is_public: bool,
/// Information on the fees and requirements that the counterparty requires when forwarding
/// payments to us through this channel.
pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
}
}
+/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
+macro_rules! convert_chan_err {
+ ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
+ match $err {
+ ChannelError::Ignore(msg) => {
+ (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+ },
+ ChannelError::Close(msg) => {
+ log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
+ if let Some(short_id) = $channel.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
+ }
+ let shutdown_res = $channel.force_shutdown(true);
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+ },
+ ChannelError::CloseDelayBroadcast(msg) => {
+ log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
+ if let Some(short_id) = $channel.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
+ }
+ let shutdown_res = $channel.force_shutdown(false);
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+ }
+ }
+ }
+}
+
macro_rules! break_chan_entry {
($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
match $res {
Ok(res) => res,
- Err(ChannelError::Ignore(msg)) => {
- break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
- },
- Err(ChannelError::Close(msg)) => {
- log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ Err(e) => {
+ let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+ if drop {
+ $entry.remove_entry();
}
- break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
- },
- Err(ChannelError::CloseDelayBroadcast(_)) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
+ break Err(res);
+ }
}
}
}
($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
match $res {
Ok(res) => res,
- Err(ChannelError::Ignore(msg)) => {
- return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
- },
- Err(ChannelError::Close(msg)) => {
- log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
- }
- return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
- },
- Err(ChannelError::CloseDelayBroadcast(msg)) => {
- log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ Err(e) => {
+ let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+ if drop {
+ $entry.remove_entry();
}
- let shutdown_res = chan.force_shutdown(false);
- return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
+ return Err(res);
}
}
}
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => {
match $err {
ChannelMonitorUpdateErr::PermanentFailure => {
- log_error!($self.logger, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..]));
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
+ if let Some(short_id) = $chan.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
}
// TODO: $failed_fails is dropped here, which will cause other channels to hit the
// chain in a confused state! We need to move them into the ChannelMonitor which
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
- res
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok()));
+ (res, true)
},
ChannelMonitorUpdateErr::TemporaryFailure => {
log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
- log_bytes!($entry.key()[..]),
+ log_bytes!($chan_id[..]),
if $resend_commitment && $resend_raa {
match $action_type {
RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
if !$resend_raa {
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
}
- $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
- Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$entry.key()))
+ $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+ (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
},
}
- }
+ };
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { {
+ let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key());
+ if drop {
+ $entry.remove_entry();
+ }
+ res
+ } };
}
macro_rules! return_monitor_err {
}
}
+macro_rules! handle_chan_restoration_locked {
+ ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
+ $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
+ $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr) => { {
+ let mut htlc_forwards = None;
+ let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
+
+ let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
+ let chanmon_update_is_none = chanmon_update.is_none();
+ let res = loop {
+ let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
+ if !forwards.is_empty() {
+ htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
+ $channel_entry.get().get_funding_txo().unwrap(), forwards));
+ }
+
+ if chanmon_update.is_some() {
+ // On reconnect, we, by definition, only resend a funding_locked if there have been
+ // no commitment updates, so the only channel monitor update which could also be
+ // associated with a funding_locked would be the funding_created/funding_signed
+ // monitor update. That monitor update failing implies that we won't send
+ // funding_locked until it's been updated, so we can't have a funding_locked and a
+ // monitor update here (so we don't bother to handle it correctly below).
+ assert!($funding_locked.is_none());
+ // A channel monitor update makes no sense without either a funding_locked or a
+ // commitment update to process after it. Since we can't have a funding_locked, we
+ // only bother to handle the monitor-update + commitment_update case below.
+ assert!($commitment_update.is_some());
+ }
+
+ if let Some(msg) = $funding_locked {
+ // Similar to the above, this implies that we're letting the funding_locked fly
+ // before it should be allowed to.
+ assert!(chanmon_update.is_none());
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+ node_id: counterparty_node_id,
+ msg,
+ });
+ if let Some(announcement_sigs) = $self.get_announcement_sigs($channel_entry.get()) {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: counterparty_node_id,
+ msg: announcement_sigs,
+ });
+ }
+ $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), $channel_entry.get().channel_id());
+ }
+
+ let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
+ if let Some(monitor_update) = chanmon_update {
+ // We only ever broadcast a funding transaction in response to a funding_signed
+ // message and the resulting monitor update. Thus, on channel_reestablish
+ // message handling we can't have a funding transaction to broadcast. When
+ // processing a monitor update finishing resulting in a funding broadcast, we
+ // cannot have a second monitor update, thus this case would indicate a bug.
+ assert!(funding_broadcastable.is_none());
+ // Given we were just reconnected or finished updating a channel monitor, the
+ // only case where we can get a new ChannelMonitorUpdate would be if we also
+ // have some commitment updates to send as well.
+ assert!($commitment_update.is_some());
+ if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ // channel_reestablish doesn't guarantee the order it returns is sensical
+ // for the messages it returns, but if we're setting what messages to
+ // re-transmit on monitor update success, we need to make sure it is sane.
+ let mut order = $order;
+ if $raa.is_none() {
+ order = RAACommitmentOrder::CommitmentFirst;
+ }
+ break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
+ }
+ }
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = $commitment_update {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: counterparty_node_id,
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = $raa {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: counterparty_node_id,
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match $order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+ if let Some(tx) = funding_broadcastable {
+ log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+ $self.tx_broadcaster.broadcast_transaction(&tx);
+ }
+ break Ok(());
+ };
+
+ if chanmon_update_is_none {
+ // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
+ // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
+ // should *never* end up calling back to `chain_monitor.update_channel()`.
+ assert!(res.is_ok());
+ }
+
+ (htlc_forwards, res, counterparty_node_id)
+ } }
+}
+
+macro_rules! post_handle_chan_restoration {
+ ($self: ident, $locked_res: expr) => { {
+ let (htlc_forwards, res, counterparty_node_id) = $locked_res;
+
+ let _ = handle_error!($self, res, counterparty_node_id);
+
+ if let Some(forwards) = htlc_forwards {
+ $self.forward_htlcs(&mut [forwards][..]);
+ }
+ } }
+}
+
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
chain_monitor,
tx_broadcaster,
- latest_block_height: AtomicUsize::new(params.latest_height),
- last_block_hash: RwLock::new(params.latest_hash),
- secp_ctx,
+ best_block: RwLock::new(params.best_block),
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
}),
+ pending_inbound_payments: Mutex::new(HashMap::new()),
+ pending_outbound_payments: Mutex::new(HashSet::new()),
+
our_network_key: keys_manager.get_node_secret(),
+ our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
+ secp_ctx,
last_node_announcement_serial: AtomicUsize::new(0),
+ highest_seen_timestamp: AtomicUsize::new(0),
per_peer_state: RwLock::new(HashMap::new()),
}
}
+ /// Gets the current configuration applied to all new channels, as
+ pub fn get_current_default_configuration(&self) -> &UserConfig {
+ &self.default_configuration
+ }
+
/// Creates a new outbound channel to the given remote node and with the given value.
///
- /// user_id will be provided back as user_channel_id in FundingGenerationReady and
- /// FundingBroadcastSafe events to allow tracking of which events correspond with which
- /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
- /// may wish to avoid using 0 for user_id here.
+ /// user_id will be provided back as user_channel_id in FundingGenerationReady events to allow
+ /// tracking of which events correspond with which create_channel call. Note that the
+ /// user_channel_id defaults to 0 for inbound channels, so you may wish to avoid using 0 for
+ /// user_id here. user_id has no meaning inside of LDK, it is simply copied to events and
+ /// otherwise ignored.
///
/// If successful, will generate a SendOpenChannel message event, so you should probably poll
/// PeerManager::process_events afterwards.
let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
let res = channel.get_open_channel(self.genesis_hash.clone());
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
debug_assert!(&self.total_consistency_lock.try_write().is_err());
let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
+ funding_txo: channel.get_funding_txo(),
short_channel_id: channel.get_short_channel_id(),
remote_network_id: channel.get_counterparty_node_id(),
counterparty_features: InitFeatures::empty(),
inbound_capacity_msat,
outbound_capacity_msat,
user_id: channel.get_user_id(),
- is_live: channel.is_live(),
+ is_outbound: channel.is_outbound(),
+ is_funding_locked: channel.is_usable(),
+ is_usable: channel.is_live(),
+ is_public: channel.should_announce(),
counterparty_forwarding_info: channel.counterparty_forwarding_info(),
});
}
/// Gets the list of usable channels, in random order. Useful as an argument to
/// get_route to ensure non-announced channels are used.
///
- /// These are guaranteed to have their is_live value set to true, see the documentation for
- /// ChannelDetails::is_live for more info on exactly what the criteria are.
+ /// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
+ /// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
+ /// are.
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
// Note we use is_live here instead of usable which leads to somewhat confused
// internal/external nomenclature, but that's ok cause that's probably what the user
///
/// May generate a SendShutdown message event on success, which should be relayed.
pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let (mut failed_htlcs, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
}
}
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> {
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
if let Some(node_id) = peer_node_id {
if chan.get().get_counterparty_node_id() != *node_id {
- // Error or Ok here doesn't matter - the result is only exposed publicly
- // when peer_node_id is None anyway.
- return Ok(());
+ return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
}
if let Some(short_id) = chan.get().get_short_channel_id() {
});
}
- Ok(())
+ Ok(chan.get_counterparty_node_id())
}
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- self.force_close_channel_with_peer(channel_id, None)
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ match self.force_close_channel_with_peer(channel_id, None) {
+ Ok(counterparty_node_id) => {
+ self.channel_state.lock().unwrap().pending_msg_events.push(
+ events::MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+ },
+ }
+ );
+ Ok(())
+ },
+ Err(e) => Err(e)
+ }
}
/// Force close all channels, immediately broadcasting the latest local commitment transaction
// HTLC_FAIL_BACK_BUFFER blocks to go.
// Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
// before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
- if (msg.cltv_expiry as u64) <= self.latest_block_height.load(Ordering::Acquire) as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+ if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
}
// final_incorrect_htlc_amount
msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
};
+ if payment_data.is_none() {
+ return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
+ }
+
// Note that we could obviously respond immediately with an update_fulfill_htlc
// message, however that would leak that we are the recipient of this payment, so
// instead we stay symmetric with the forwarding case, only responding (after a
PendingHTLCStatus::Forward(PendingHTLCInfo {
routing: PendingHTLCRouting::Receive {
- payment_data,
+ payment_data: payment_data.unwrap(),
incoming_cltv_expiry: msg.cltv_expiry,
},
payment_hash: msg.payment_hash.clone(),
if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
}
- let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let cur_height = self.best_block.read().unwrap().height() + 1;
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
// packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32) -> Result<(), APIError> {
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
let prng_seed = self.keys_manager.get_secure_random_bytes();
- let session_priv = SecretKey::from_slice(&self.keys_manager.get_secure_random_bytes()[..]).expect("RNG is busted");
+ let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
+ let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
}
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes));
let err: Result<(), _> = loop {
let mut channel_lock = self.channel_state.lock().unwrap();
return Err(PaymentSendFailure::PathParameterError(path_errs));
}
- let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let cur_height = self.best_block.read().unwrap().height() + 1;
let mut results = Vec::new();
for path in route.paths.iter() {
results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height));
}
}
- /// Call this upon creation of a funding transaction for the given channel.
- ///
- /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
- /// or your counterparty can steal your funds!
- ///
- /// Panics if a funding transaction has already been provided for this channel.
- ///
- /// May panic if the funding_txo is duplicative with some other channel (note that this should
- /// be trivially prevented by using unique funding transaction keys per-channel).
- pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-
+ /// Handles the generation of a funding transaction, optionally (for tests) with a function
+ /// which checks the correctness of the funding transaction given the associated channel.
+ fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>
+ (&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, find_funding_output: FundingOutput) -> Result<(), APIError> {
let (chan, msg) = {
let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
Some(mut chan) => {
- (chan.get_outbound_funding_created(funding_txo, &self.logger)
+ let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+ (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
.map_err(|e| if let ChannelError::Close(msg) = e {
MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
} else { unreachable!(); })
, chan)
},
- None => return
+ None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
};
match handle_error!(self, res, chan.get_counterparty_node_id()) {
Ok(funding_msg) => {
(chan, funding_msg)
},
- Err(_) => { return; }
+ Err(_) => { return Err(APIError::ChannelUnavailable {
+ err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+ }) },
}
};
e.insert(chan);
}
}
+ Ok(())
+ }
+
+ #[cfg(test)]
+ pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
+ self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |_, tx| {
+ Ok(OutPoint { txid: tx.txid(), index: output_index })
+ })
+ }
+
+ /// Call this upon creation of a funding transaction for the given channel.
+ ///
+ /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
+ /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
+ ///
+ /// Panics if a funding transaction has already been provided for this channel.
+ ///
+ /// May panic if the output found in the funding transaction is duplicative with some other
+ /// channel (note that this should be trivially prevented by using unique funding transaction
+ /// keys per-channel).
+ ///
+ /// Do NOT broadcast the funding transaction yourself. When we have safely received our
+ /// counterparty's signature the funding transaction will automatically be broadcast via the
+ /// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
+ ///
+ /// Note that this includes RBF or similar transaction replacement strategies - lightning does
+ /// not currently support replacing a funding transaction on an existing channel. Instead,
+ /// create a new channel with a conflicting funding transaction.
+ ///
+ /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
+ pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+
+ for inp in funding_transaction.input.iter() {
+ if inp.witness.is_empty() {
+ return Err(APIError::APIMisuseError {
+ err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
+ });
+ }
+ }
+ self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |chan, tx| {
+ let mut output_index = None;
+ let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
+ for (idx, outp) in tx.output.iter().enumerate() {
+ if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
+ if output_index.is_some() {
+ return Err(APIError::APIMisuseError {
+ err: "Multiple outputs matched the expected script and value".to_owned()
+ });
+ }
+ if idx > u16::max_value() as usize {
+ return Err(APIError::APIMisuseError {
+ err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+ });
+ }
+ output_index = Some(idx as u16);
+ }
+ }
+ if output_index.is_none() {
+ return Err(APIError::APIMisuseError {
+ err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
+ });
+ }
+ Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() })
+ })
}
fn get_announcement_sigs(&self, chan: &Channel<Signer>) -> Option<msgs::AnnouncementSignatures> {
// be absurd. We ensure this by checking that at least 500 (our stated public contract on when
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
- const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
+ const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
#[deny(const_err)]
#[allow(dead_code)]
// ...by failing to compile if the number of addresses that would be half of a message is
/// only Tor Onion addresses.
///
/// Panics if addresses is absurdly large (more than 500).
- pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<NetAddress>) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
if addresses.len() > 500 {
panic!("More than half the message size was taken up by public addresses!");
}
+ // While all existing nodes handle unsorted addresses just fine, the spec requires that
+ // addresses be sorted for future compatibility.
+ addresses.sort_by_key(|addr| addr.get_id());
+
let announcement = msgs::UnsignedNodeAnnouncement {
features: NodeFeatures::known(),
timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32,
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
/// Will likely generate further events.
pub fn process_pending_htlc_forwards(&self) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut new_events = Vec::new();
let mut failed_forwards = Vec::new();
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
- match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
+ match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
Err(e) => {
if let ChannelError::Ignore(msg) = e {
log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry },
incoming_shared_secret, payment_hash, amt_to_forward, .. },
prev_funding_outpoint } => {
- let prev_hop = HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- };
-
- let mut total_value = 0;
- let payment_secret_opt =
- if let &Some(ref data) = &payment_data { Some(data.payment_secret.clone()) } else { None };
- let htlcs = channel_state.claimable_htlcs.entry((payment_hash, payment_secret_opt))
- .or_insert(Vec::new());
- htlcs.push(ClaimableHTLC {
- prev_hop,
+ let claimable_htlc = ClaimableHTLC {
+ prev_hop: HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ },
value: amt_to_forward,
payment_data: payment_data.clone(),
cltv_expiry: incoming_cltv_expiry,
- });
- if let &Some(ref data) = &payment_data {
- for htlc in htlcs.iter() {
- total_value += htlc.value;
- if htlc.payment_data.as_ref().unwrap().total_msat != data.total_msat {
- total_value = msgs::MAX_VALUE_MSAT;
- }
- if total_value >= msgs::MAX_VALUE_MSAT { break; }
- }
- if total_value >= msgs::MAX_VALUE_MSAT || total_value > data.total_msat {
- for htlc in htlcs.iter() {
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(
- &byte_utils::be32_to_array(
- self.latest_block_height.load(Ordering::Acquire)
- as u32,
- ),
- );
- failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: htlc.prev_hop.short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: htlc.prev_hop.htlc_id,
- incoming_packet_shared_secret: htlc.prev_hop.incoming_packet_shared_secret,
- }), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
- ));
- }
- } else if total_value == data.total_msat {
- new_events.push(events::Event::PaymentReceived {
- payment_hash,
- payment_secret: Some(data.payment_secret),
- amt: total_value,
- });
+ };
+
+ macro_rules! fail_htlc {
+ ($htlc: expr) => {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(
+ &byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
+ );
+ failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: $htlc.prev_hop.short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: $htlc.prev_hop.htlc_id,
+ incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
+ }), payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+ ));
}
- } else {
- new_events.push(events::Event::PaymentReceived {
- payment_hash,
- payment_secret: None,
- amt: amt_to_forward,
- });
}
+
+ // Check that the payment hash and secret are known. Note that we
+ // MUST take care to handle the "unknown payment hash" and
+ // "incorrect payment secret" cases here identically or we'd expose
+ // that we are the ultimate recipient of the given payment hash.
+ // Further, we must not expose whether we have any other HTLCs
+ // associated with the same payment_hash pending or not.
+ let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
+ match payment_secrets.entry(payment_hash) {
+ hash_map::Entry::Vacant(_) => {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ },
+ hash_map::Entry::Occupied(inbound_payment) => {
+ if inbound_payment.get().payment_secret != payment_data.payment_secret {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
+ log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
+ fail_htlc!(claimable_htlc);
+ } else {
+ let mut total_value = 0;
+ let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
+ .or_insert(Vec::new());
+ htlcs.push(claimable_htlc);
+ for htlc in htlcs.iter() {
+ total_value += htlc.value;
+ if htlc.payment_data.total_msat != payment_data.total_msat {
+ log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+ log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat);
+ total_value = msgs::MAX_VALUE_MSAT;
+ }
+ if total_value >= msgs::MAX_VALUE_MSAT { break; }
+ }
+ if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat {
+ log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
+ log_bytes!(payment_hash.0), total_value, payment_data.total_msat);
+ for htlc in htlcs.iter() {
+ fail_htlc!(htlc);
+ }
+ } else if total_value == payment_data.total_msat {
+ new_events.push(events::Event::PaymentReceived {
+ payment_hash,
+ payment_preimage: inbound_payment.get().payment_preimage,
+ payment_secret: payment_data.payment_secret,
+ amt: total_value,
+ user_payment_id: inbound_payment.get().user_payment_id,
+ });
+ // Only ever generate at most one PaymentReceived
+ // per registered payment_hash, even if it isn't
+ // claimed.
+ inbound_payment.remove_entry();
+ } else {
+ // Nothing to do - we haven't reached the total
+ // payment value yet, wait until we receive more
+ // MPP parts.
+ }
+ }
+ },
+ };
},
HTLCForwardInfo::AddHTLC { .. } => {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
events.append(&mut new_events);
}
- /// Free the background events, generally called from timer_chan_freshness_every_min.
+ /// Free the background events, generally called from timer_tick_occurred.
///
/// Exposed for testing to allow us to process events quickly without generating accidental
- /// BroadcastChannelUpdate events in timer_chan_freshness_every_min.
+ /// BroadcastChannelUpdate events in timer_tick_occurred.
///
/// Expects the caller to have a total_consistency_lock read lock.
- fn process_background_events(&self) {
+ fn process_background_events(&self) -> bool {
let mut background_events = Vec::new();
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
+ if background_events.is_empty() {
+ return false;
+ }
+
for event in background_events.drain(..) {
match event {
BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
},
}
}
+ true
}
#[cfg(any(test, feature = "_test_utils"))]
/// This method handles all the details, and must be called roughly once per minute.
///
/// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
- pub fn timer_chan_freshness_every_min(&self) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- self.process_background_events();
+ pub fn timer_tick_occurred(&self) {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut should_persist = NotifyOption::SkipPersist;
+ if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- for (_, chan) in channel_state.by_id.iter_mut() {
- if chan.is_disabled_staged() && !chan.is_live() {
- if let Ok(update) = self.get_channel_update(&chan) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ for (_, chan) in channel_state.by_id.iter_mut() {
+ match chan.channel_update_status() {
+ ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
+ ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
+ ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+ ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+ ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+ },
+ ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+ },
+ _ => {},
}
- chan.to_fresh();
- } else if chan.is_disabled_staged() && chan.is_live() {
- chan.to_fresh();
- } else if chan.is_disabled_marked() {
- chan.to_disabled_staged();
}
- }
+
+ should_persist
+ });
}
/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
/// along the path (including in our own channel on which we received it).
/// Returns false if no payment was found to fail backwards, true if the process of failing the
/// HTLC backwards has been started.
- pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>) -> bool {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(*payment_hash, *payment_secret));
+ let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
if let Some(mut sources) = removed_source {
for htlc in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
- self.latest_block_height.load(Ordering::Acquire) as u32,
- ));
+ self.best_block.read().unwrap().height()));
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
self.fail_htlc_backwards_internal(channel_state,
htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
},
- HTLCSource::OutboundRoute { .. } => {
- self.pending_events.lock().unwrap().push(
- events::Event::PaymentFailed {
- payment_hash,
- rejected_by_dest: false,
+ HTLCSource::OutboundRoute { session_priv, .. } => {
+ if {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
+ } {
+ self.pending_events.lock().unwrap().push(
+ events::Event::PaymentFailed {
+ payment_hash,
+ rejected_by_dest: false,
#[cfg(test)]
- error_code: None,
+ error_code: None,
#[cfg(test)]
- error_data: None,
- }
- )
+ error_data: None,
+ }
+ )
+ } else {
+ log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
+ }
},
};
}
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
- HTLCSource::OutboundRoute { ref path, .. } => {
+ HTLCSource::OutboundRoute { ref path, session_priv, .. } => {
+ if {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
+ } {
+ log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
+ return;
+ }
log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
mem::drop(channel_state_lock);
match &onion_error {
/// generating message events for the net layer to claim the payment, if possible. Thus, you
/// should probably kick the net layer to go send messages if this returns true!
///
- /// You must specify the expected amounts for this HTLC, and we will only claim HTLCs
- /// available within a few percent of the expected amount. This is critical for several
- /// reasons : a) it avoids providing senders with `proof-of-payment` (in the form of the
- /// payment_preimage without having provided the full value and b) it avoids certain
- /// privacy-breaking recipient-probing attacks which may reveal payment activity to
- /// motivated attackers.
- ///
- /// Note that the privacy concerns in (b) are not relevant in payments with a payment_secret
- /// set. Thus, for such payments we will claim any payments which do not under-pay.
+ /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
+ /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
+ /// event matches your expectation. If you fail to do so and call this method, you may provide
+ /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
/// May panic if called except in response to a PaymentReceived event.
- pub fn claim_funds(&self, payment_preimage: PaymentPreimage, payment_secret: &Option<PaymentSecret>, expected_amount: u64) -> bool {
+ ///
+ /// [`create_inbound_payment`]: Self::create_inbound_payment
+ /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+ pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(payment_hash, *payment_secret));
+ let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
if let Some(mut sources) = removed_source {
assert!(!sources.is_empty());
// we got all the HTLCs and then a channel closed while we were waiting for the user to
// provide the preimage, so worrying too much about the optimal handling isn't worth
// it.
-
- let (is_mpp, mut valid_mpp) = if let &Some(ref data) = &sources[0].payment_data {
- assert!(payment_secret.is_some());
- (true, data.total_msat >= expected_amount)
- } else {
- assert!(payment_secret.is_none());
- (false, false)
- };
-
+ let mut valid_mpp = true;
for htlc in sources.iter() {
- if !is_mpp || !valid_mpp { break; }
if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
valid_mpp = false;
+ break;
}
}
let mut errs = Vec::new();
let mut claimed_any_htlcs = false;
for htlc in sources.drain(..) {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- if (is_mpp && !valid_mpp) || (!is_mpp && (htlc.value < expected_amount || htlc.value > expected_amount * 2)) {
+ if !valid_mpp {
+ if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
- self.latest_block_height.load(Ordering::Acquire) as u32,
- ));
+ self.best_block.read().unwrap().height()));
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
claimed_any_htlcs = true;
} else { errs.push(e); }
},
- Err(None) if is_mpp => unreachable!("We already checked for channel existence, we can't fail here!"),
- Err(None) => {
- log_warn!(self.logger, "Channel we expected to claim an HTLC from was closed.");
- },
+ Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"),
Ok(()) => claimed_any_htlcs = true,
}
}
fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
match source {
- HTLCSource::OutboundRoute { .. } => {
+ HTLCSource::OutboundRoute { session_priv, .. } => {
mem::drop(channel_state_lock);
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentSent {
- payment_preimage
- });
+ if {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
+ } {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PaymentSent {
+ payment_preimage
+ });
+ } else {
+ log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
+ }
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
/// Gets the node_id held by this ChannelManager
pub fn get_our_node_id(&self) -> PublicKey {
- PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
+ self.our_network_pubkey.clone()
}
/// Restores a single, given channel to normal operation after a
/// 4) once all remote copies are updated, you call this function with the update_id that
/// completed, and once it is the latest the Channel will be re-enabled.
pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut close_results = Vec::new();
- let mut htlc_forwards = Vec::new();
- let mut htlc_failures = Vec::new();
- let mut pending_events = Vec::new();
-
- {
+ let (mut pending_failures, chan_restoration_res) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) {
- Some(chan) => chan,
- None => return,
+ let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
+ hash_map::Entry::Occupied(chan) => chan,
+ hash_map::Entry::Vacant(_) => return,
};
- if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id {
+ if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
return;
}
- let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(&self.logger);
- if !pending_forwards.is_empty() {
- htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards));
+ let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
+ (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked))
+ };
+ post_handle_chan_restoration!(self, chan_restoration_res);
+ for failure in pending_failures.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ }
+ }
+
+ fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+ if msg.chain_hash != self.genesis_hash {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
+ }
+
+ let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration)
+ .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ match channel_state.by_id.entry(channel.channel_id()) {
+ hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone())),
+ hash_map::Entry::Vacant(entry) => {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: counterparty_node_id.clone(),
+ msg: channel.get_accept_channel(),
+ });
+ entry.insert(channel);
}
- htlc_failures.append(&mut pending_failures);
-
- macro_rules! handle_cs { () => {
- if let Some(update) = commitment_update {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: channel.get_counterparty_node_id(),
- updates: update,
- });
- }
- } }
- macro_rules! handle_raa { () => {
- if let Some(revoke_and_ack) = raa {
- pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: channel.get_counterparty_node_id(),
- msg: revoke_and_ack,
- });
- }
- } }
- match order {
- RAACommitmentOrder::CommitmentFirst => {
- handle_cs!();
- handle_raa!();
- },
- RAACommitmentOrder::RevokeAndACKFirst => {
- handle_raa!();
- handle_cs!();
- },
- }
- if needs_broadcast_safe {
- pending_events.push(events::Event::FundingBroadcastSafe {
- funding_txo: channel.get_funding_txo().unwrap(),
- user_channel_id: channel.get_user_id(),
- });
- }
- if let Some(msg) = funding_locked {
- pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
- node_id: channel.get_counterparty_node_id(),
- msg,
- });
- if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
- pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: channel.get_counterparty_node_id(),
- msg: announcement_sigs,
- });
- }
- short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
- }
- }
-
- self.pending_events.lock().unwrap().append(&mut pending_events);
-
- for failure in htlc_failures.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
- }
- self.forward_htlcs(&mut htlc_forwards[..]);
-
- for res in close_results.drain(..) {
- self.finish_force_close_channel(res);
- }
- }
-
- fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
- if msg.chain_hash != self.genesis_hash {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
- }
-
- let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration)
- .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- match channel_state.by_id.entry(channel.channel_id()) {
- hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone())),
- hash_map::Entry::Vacant(entry) => {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: counterparty_node_id.clone(),
- msg: channel.get_accept_channel(),
- });
- entry.insert(channel);
- }
- }
- Ok(())
- }
+ }
+ Ok(())
+ }
fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
let (value, output_script, user_id) = {
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
let ((funding_msg, monitor), mut chan) = {
- let last_block_hash = *self.last_block_hash.read().unwrap();
+ let best_block = *self.best_block.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- (try_chan_entry!(self, chan.get_mut().funding_created(msg, last_block_hash, &self.logger), channel_state, chan), chan.remove())
+ (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
}
}
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
- let (funding_txo, user_id) = {
- let last_block_hash = *self.last_block_hash.read().unwrap();
+ let funding_tx = {
+ let best_block = *self.best_block.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.channel_id) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let monitor = match chan.get_mut().funding_signed(&msg, last_block_hash, &self.logger) {
+ let (monitor, funding_tx) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
Ok(update) => update,
Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
};
if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
}
- (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
+ funding_tx
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::FundingBroadcastSafe {
- funding_txo,
- user_channel_id: user_id,
- });
+ log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
+ self.tx_broadcaster.broadcast_transaction(&funding_tx);
Ok(())
}
}
};
if let Some(broadcast_tx) = tx {
- log_trace!(self.logger, "Broadcast onchain {}", log_tx!(broadcast_tx));
+ log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
}
if let Some(chan) = chan_option {
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
+ let (htlcs_failed_forward, chan_restoration_res) = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
- match channel_state.by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- // Currently, we expect all holding cell update_adds to be dropped on peer
- // disconnect, so Channel's reestablish will never hand us any holding cell
- // freed HTLCs to fail backwards. If in the future we no longer drop pending
- // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
- let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
- try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
- if let Some(monitor_update) = monitor_update_opt {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- if revoke_and_ack.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
- }
- if commitment_update.is_none() {
- order = RAACommitmentOrder::RevokeAndACKFirst;
- }
- return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some());
- //TODO: Resend the funding_locked if needed once we get the monitor running again
- }
- }
- if let Some(msg) = funding_locked {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
- node_id: counterparty_node_id.clone(),
- msg
- });
- }
- macro_rules! send_raa { () => {
- if let Some(msg) = revoke_and_ack {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: counterparty_node_id.clone(),
- msg
- });
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- } }
- macro_rules! send_cu { () => {
- if let Some(updates) = commitment_update {
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ // Currently, we expect all holding cell update_adds to be dropped on peer
+ // disconnect, so Channel's reestablish will never hand us any holding cell
+ // freed HTLCs to fail backwards. If in the future we no longer drop pending
+ // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+ let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
+ try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+ if let Some(msg) = shutdown {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id.clone(),
- updates
+ msg,
});
}
- } }
- match order {
- RAACommitmentOrder::RevokeAndACKFirst => {
- send_raa!();
- send_cu!();
- },
- RAACommitmentOrder::CommitmentFirst => {
- send_cu!();
- send_raa!();
- },
- }
- if let Some(msg) = shutdown {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- Ok(())
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
+ (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+ }
+ };
+ post_handle_chan_restoration!(self, chan_restoration_res);
+ self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
+ Ok(())
}
/// Begin Update fee process. Allowed only on an outbound channel.
/// (C-not exported) Cause its doc(hidden) anyway
#[doc(hidden)]
pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let counterparty_node_id;
let err: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
}
}
- /// Process pending events from the `chain::Watch`.
- fn process_pending_monitor_events(&self) {
+ /// Process pending events from the `chain::Watch`, returning whether any events were processed.
+ fn process_pending_monitor_events(&self) -> bool {
let mut failed_channels = Vec::new();
+ let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+ let has_pending_monitor_events = !pending_monitor_events.is_empty();
+ for monitor_event in pending_monitor_events {
+ match monitor_event {
+ MonitorEvent::HTLCEvent(htlc_update) => {
+ if let Some(preimage) = htlc_update.payment_preimage {
+ log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+ } else {
+ log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ },
+ MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ failed_channels.push(chan.force_shutdown(false));
+ if let Ok(update) = self.get_channel_update(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ },
+ });
+ }
+ },
+ }
+ }
+
+ for failure in failed_channels.drain(..) {
+ self.finish_force_close_channel(failure);
+ }
+
+ has_pending_monitor_events
+ }
+
+ /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
+ /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
+ /// update was applied.
+ ///
+ /// This should only apply to HTLCs which were added to the holding cell because we were
+ /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
+ /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
+ /// code to inform them of a channel monitor update.
+ fn check_free_holding_cells(&self) -> bool {
+ let mut has_monitor_update = false;
+ let mut failed_htlcs = Vec::new();
+ let mut handle_errors = Vec::new();
{
- for monitor_event in self.chain_monitor.release_pending_monitor_events() {
- match monitor_event {
- MonitorEvent::HTLCEvent(htlc_update) => {
- if let Some(preimage) = htlc_update.payment_preimage {
- log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
- } else {
- log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+
+ by_id.retain(|channel_id, chan| {
+ match chan.maybe_free_holding_cell_htlcs(&self.logger) {
+ Ok((commitment_opt, holding_cell_failed_htlcs)) => {
+ if !holding_cell_failed_htlcs.is_empty() {
+ failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
}
- },
- MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
- let by_id = &mut channel_state.by_id;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
- if let Some(short_id) = chan.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- failed_channels.push(chan.force_shutdown(false));
- if let Ok(update) = self.get_channel_update(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
+ if let Some((commitment_update, monitor_update)) = commitment_opt {
+ if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ has_monitor_update = true;
+ let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), res));
+ if close_channel { return false; }
+ } else {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: commitment_update,
});
}
}
+ true
},
+ Err(e) => {
+ let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ !close_channel
+ }
}
- }
+ });
}
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
+ let has_update = has_monitor_update || !failed_htlcs.is_empty();
+ for (failures, channel_id) in failed_htlcs.drain(..) {
+ self.fail_holding_cell_htlcs(failures, channel_id);
}
+
+ for (counterparty_node_id, err) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
+ }
+
+ has_update
}
/// Handle a list of channel failures during a block_connected or block_disconnected call,
// We cannot broadcast our latest local state via monitor update (as
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
- // timer_chan_freshness_every_min, guaranteeing we're running normally.
+ // timer_tick_occurred, guaranteeing we're running normally.
if let Some((funding_txo, update)) = failure.0.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
self.finish_force_close_channel(failure);
}
}
+
+ fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
+ assert!(invoice_expiry_delta_secs <= 60*60*24*365); // Sadly bitcoin timestamps are u32s, so panic before 2106
+
+ let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes());
+
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
+ match payment_secrets.entry(payment_hash) {
+ hash_map::Entry::Vacant(e) => {
+ e.insert(PendingInboundPayment {
+ payment_secret, min_value_msat, user_payment_id, payment_preimage,
+ // We assume that highest_seen_timestamp is pretty close to the current time -
+ // its updated when we receive a new block with the maximum time we've seen in
+ // a header. It should never be more than two hours in the future.
+ // Thus, we add two hours here as a buffer to ensure we absolutely
+ // never fail a payment too early.
+ // Note that we assume that received blocks have reasonably up-to-date
+ // timestamps.
+ expiry_time: self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + invoice_expiry_delta_secs as u64 + 7200,
+ });
+ },
+ hash_map::Entry::Occupied(_) => return Err(APIError::APIMisuseError { err: "Duplicate payment hash".to_owned() }),
+ }
+ Ok(payment_secret)
+ }
+
+ /// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
+ /// to pay us.
+ ///
+ /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
+ /// [`PaymentHash`] and [`PaymentPreimage`] for you, returning the first and storing the second.
+ ///
+ /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentReceived`], which
+ /// will have the [`PaymentReceived::payment_preimage`] field filled in. That should then be
+ /// passed directly to [`claim_funds`].
+ ///
+ /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
+ ///
+ /// [`claim_funds`]: Self::claim_funds
+ /// [`PaymentReceived`]: events::Event::PaymentReceived
+ /// [`PaymentReceived::payment_preimage`]: events::Event::PaymentReceived::payment_preimage
+ /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+ pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> (PaymentHash, PaymentSecret) {
+ let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes());
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+
+ (payment_hash,
+ self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs, user_payment_id)
+ .expect("RNG Generated Duplicate PaymentHash"))
+ }
+
+ /// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
+ /// stored external to LDK.
+ ///
+ /// A [`PaymentReceived`] event will only be generated if the [`PaymentSecret`] matches a
+ /// payment secret fetched via this method or [`create_inbound_payment`], and which is at least
+ /// the `min_value_msat` provided here, if one is provided.
+ ///
+ /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This
+ /// method may return an Err if another payment with the same payment_hash is still pending.
+ ///
+ /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to
+ /// allow tracking of which events correspond with which calls to this and
+ /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply
+ /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events
+ /// with invoice metadata stored elsewhere.
+ ///
+ /// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
+ /// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
+ /// before a [`PaymentReceived`] event will be generated, ensuring that we do not provide the
+ /// sender "proof-of-payment" unless they have paid the required amount.
+ ///
+ /// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
+ /// in excess of the current time. This should roughly match the expiry time set in the invoice.
+ /// After this many seconds, we will remove the inbound payment, resulting in any attempts to
+ /// pay the invoice failing. The BOLT spec suggests 3,600 secs as a default validity time for
+ /// invoices when no timeout is set.
+ ///
+ /// Note that we use block header time to time-out pending inbound payments (with some margin
+ /// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will
+ /// accept a payment and generate a [`PaymentReceived`] event for some time after the expiry.
+ /// If you need exact expiry semantics, you should enforce them upon receipt of
+ /// [`PaymentReceived`].
+ ///
+ /// Pending inbound payments are stored in memory and in serialized versions of this
+ /// [`ChannelManager`]. If potentially unbounded numbers of inbound payments may exist and
+ /// space is limited, you may wish to rate-limit inbound payment creation.
+ ///
+ /// May panic if `invoice_expiry_delta_secs` is greater than one year.
+ ///
+ /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry`
+ /// set to at least [`MIN_FINAL_CLTV_EXPIRY`].
+ ///
+ /// [`create_inbound_payment`]: Self::create_inbound_payment
+ /// [`PaymentReceived`]: events::Event::PaymentReceived
+ /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id
+ pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
+ self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
+ }
+
+ #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
+ pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ let events = std::cell::RefCell::new(Vec::new());
+ let event_handler = |event| events.borrow_mut().push(event);
+ self.process_pending_events(&event_handler);
+ events.into_inner()
+ }
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
L::Target: Logger,
{
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
- //TODO: This behavior should be documented. It's non-intuitive that we query
- // ChannelMonitors when clearing other events.
- self.process_pending_monitor_events();
+ let events = RefCell::new(Vec::new());
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut result = NotifyOption::SkipPersist;
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
- let mut ret = Vec::new();
- let mut channel_state = self.channel_state.lock().unwrap();
- mem::swap(&mut ret, &mut channel_state.pending_msg_events);
- ret
+ if self.check_free_holding_cells() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let mut pending_events = Vec::new();
+ let mut channel_state = self.channel_state.lock().unwrap();
+ mem::swap(&mut pending_events, &mut channel_state.pending_msg_events);
+
+ if !pending_events.is_empty() {
+ events.replace(pending_events);
+ }
+
+ result
+ });
+ events.into_inner()
}
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
- T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
- F::Target: FeeEstimator,
- L::Target: Logger,
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- fn get_and_clear_pending_events(&self) -> Vec<Event> {
- //TODO: This behavior should be documented. It's non-intuitive that we query
- // ChannelMonitors when clearing other events.
- self.process_pending_monitor_events();
+ /// Processes events that must be periodically handled.
+ ///
+ /// An [`EventHandler`] may safely call back to the provider in order to handle an event.
+ /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
+ ///
+ /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared
+ /// when processed, an [`EventHandler`] must be able to handle previously seen events when
+ /// restarting from an old state.
+ fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut result = NotifyOption::SkipPersist;
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
- let mut ret = Vec::new();
- let mut pending_events = self.pending_events.lock().unwrap();
- mem::swap(&mut ret, &mut *pending_events);
- ret
+ let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
+
+ for event in pending_events.drain(..) {
+ handler.handle_event(event);
+ }
+
+ result
+ });
}
}
L::Target: Logger,
{
fn block_connected(&self, block: &Block, height: u32) {
+ {
+ let best_block = self.best_block.read().unwrap();
+ assert_eq!(best_block.block_hash(), block.header.prev_blockhash,
+ "Blocks must be connected in chain-order - the connected header must build on the last connected header");
+ assert_eq!(best_block.height(), height - 1,
+ "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
+ }
+
let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
- ChannelManager::block_connected(self, &block.header, &txdata, height);
+ self.transactions_confirmed(&block.header, &txdata, height);
+ self.best_block_updated(&block.header, height);
}
- fn block_disconnected(&self, header: &BlockHeader, _height: u32) {
- ChannelManager::block_disconnected(self, header);
+ fn block_disconnected(&self, header: &BlockHeader, height: u32) {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ let new_height = height - 1;
+ {
+ let mut best_block = self.best_block.write().unwrap();
+ assert_eq!(best_block.block_hash(), header.block_hash(),
+ "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
+ assert_eq!(best_block.height(), height,
+ "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
+ *best_block = BestBlock::new(header.prev_blockhash, new_height)
+ }
+
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time));
}
}
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
- T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
- F::Target: FeeEstimator,
- L::Target: Logger,
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Confirm for ChannelManager<Signer, M, T, K, F, L>
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- /// Updates channel state based on transactions seen in a connected block.
- pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // during initialization prior to the chain_monitor being fully configured in some cases.
+ // See the docs for `ChannelManagerReadArgs` for more.
+
+ let block_hash = header.block_hash();
+ log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
+
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new())));
+ }
+
+ fn best_block_updated(&self, header: &BlockHeader, height: u32) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
+
let block_hash = header.block_hash();
- log_trace!(self.logger, "Block {} at height {} connected", block_hash, height);
+ log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
+
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+
+ *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
+
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time));
+
+ macro_rules! max_time {
+ ($timestamp: expr) => {
+ loop {
+ // Update $timestamp to be the max of its current value and the block
+ // timestamp. This should keep us close to the current time without relying on
+ // having an explicit local time source.
+ // Just in case we end up in a race, we loop until we either successfully
+ // update $timestamp or decide we don't need to.
+ let old_serial = $timestamp.load(Ordering::Acquire);
+ if old_serial >= header.time as usize { break; }
+ if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
+ break;
+ }
+ }
+ }
+ }
+ max_time!(self.last_node_announcement_serial);
+ max_time!(self.highest_seen_timestamp);
+ let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
+ payment_secrets.retain(|_, inbound_payment| {
+ inbound_payment.expiry_time > header.time as u64
+ });
+ }
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ fn get_relevant_txids(&self) -> Vec<Txid> {
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut res = Vec::with_capacity(channel_state.short_to_id.len());
+ for chan in channel_state.by_id.values() {
+ if let Some(funding_txo) = chan.get_funding_txo() {
+ res.push(funding_txo.txid);
+ }
+ }
+ res
+ }
- assert_eq!(*self.last_block_hash.read().unwrap(), header.prev_blockhash,
- "Blocks must be connected in chain-order - the connected header must build on the last connected header");
- assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
- "Blocks must be connected in chain-order - the connected header must build on the last connected header");
- self.latest_block_height.store(height as usize, Ordering::Release);
- *self.last_block_hash.write().unwrap() = block_hash;
+ fn transaction_unconfirmed(&self, txid: &Txid) {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ self.do_chain_event(None, |channel| {
+ if let Some(funding_txo) = channel.get_funding_txo() {
+ if funding_txo.txid == *txid {
+ channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new()))
+ } else { Ok((None, Vec::new())) }
+ } else { Ok((None, Vec::new())) }
+ });
+ }
+}
+
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
+ /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
+ /// the function.
+ fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>>
+ (&self, height_opt: Option<u32>, f: FN) {
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // during initialization prior to the chain_monitor being fully configured in some cases.
+ // See the docs for `ChannelManagerReadArgs` for more.
let mut failed_channels = Vec::new();
let mut timed_out_htlcs = Vec::new();
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, channel| {
- let res = channel.block_connected(header, txdata, height);
+ let res = f(channel);
if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
}
} else if let Err(e) = res {
+ if let Some(short_id) = channel.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ // It looks like our counterparty went on-chain or funding transaction was
+ // reorged out of the main chain. Close the channel.
+ failed_channels.push(channel.force_shutdown(true));
+ if let Ok(update) = self.get_channel_update(&channel) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
});
return false;
}
- if let Some(funding_txo) = channel.get_funding_txo() {
- for &(_, tx) in txdata.iter() {
- for inp in tx.input.iter() {
- if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
- log_trace!(self.logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
- if let Some(short_id) = channel.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- // It looks like our counterparty went on-chain. Close the channel.
- failed_channels.push(channel.force_shutdown(true));
- if let Ok(update) = self.get_channel_update(&channel) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- return false;
- }
- }
- }
- }
true
});
- channel_state.claimable_htlcs.retain(|&(ref payment_hash, _), htlcs| {
- htlcs.retain(|htlc| {
- // If height is approaching the number of blocks we think it takes us to get
- // our commitment transaction confirmed before the HTLC expires, plus the
- // number of blocks we generally consider it to take to do a commitment update,
- // just give up on it and fail the HTLC.
- if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
- timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
- failure_code: 0x4000 | 15,
- data: htlc_msat_height_data
- }));
- false
- } else { true }
+ if let Some(height) = height_opt {
+ channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+ htlcs.retain(|htlc| {
+ // If height is approaching the number of blocks we think it takes us to get
+ // our commitment transaction confirmed before the HTLC expires, plus the
+ // number of blocks we generally consider it to take to do a commitment update,
+ // just give up on it and fail the HTLC.
+ if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+ timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+ failure_code: 0x4000 | 15,
+ data: htlc_msat_height_data
+ }));
+ false
+ } else { true }
+ });
+ !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
});
- !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
- });
+ }
}
self.handle_init_event_channel_failures(failed_channels);
for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
}
-
- loop {
- // Update last_node_announcement_serial to be the max of its current value and the
- // block timestamp. This should keep us close to the current time without relying on
- // having an explicit local time source.
- // Just in case we end up in a race, we loop until we either successfully update
- // last_node_announcement_serial or decide we don't need to.
- let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire);
- if old_serial >= header.time as usize { break; }
- if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
- break;
- }
- }
- }
-
- /// Updates channel state based on a disconnected block.
- ///
- /// If necessary, the channel may be force-closed without letting the counterparty participate
- /// in the shutdown.
- pub fn block_disconnected(&self, header: &BlockHeader) {
- // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
- // during initialization prior to the chain_monitor being fully configured in some cases.
- // See the docs for `ChannelManagerReadArgs` for more.
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-
- assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
- "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
- self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
- *self.last_block_hash.write().unwrap() = header.prev_blockhash;
-
- let mut failed_channels = Vec::new();
- {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- channel_state.by_id.retain(|_, v| {
- if v.block_disconnected(header) {
- if let Some(short_id) = v.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- failed_channels.push(v.force_shutdown(true));
- if let Ok(update) = self.get_channel_update(&v) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- false
- } else {
- true
- }
- });
- }
-
- self.handle_init_event_channel_failures(failed_channels);
}
/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
}
}
-impl<Signer: Sign, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
+impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
ChannelMessageHandler for ChannelManager<Signer, M, T, K, F, L>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
L::Target: Logger,
{
fn handle_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, their_features, msg), *counterparty_node_id);
}
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, their_features, msg), *counterparty_node_id);
}
fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, their_features, msg), *counterparty_node_id);
}
fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_closing_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_fulfill_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_commitment_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_revoke_and_ack(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
}
fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut failed_channels = Vec::new();
- let mut failed_payments = Vec::new();
let mut no_channels_remain = true;
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
channel_state.by_id.retain(|_, chan| {
if chan.get_counterparty_node_id() == *counterparty_node_id {
- // Note that currently on channel reestablish we assert that there are no
- // holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs
- // on peer disconnect here, there will need to be corresponding changes in
- // reestablish logic.
- let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- chan.to_disabled_marked();
- if !failed_adds.is_empty() {
- let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
- failed_payments.push((chan_update, failed_adds));
- }
+ chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
if chan.is_shutdown() {
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
}
- for (chan_update, mut htlc_sources) in failed_payments {
- for (htlc_source, payment_hash) in htlc_sources.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
- }
- }
}
fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) {
log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
{
let mut peer_state_lock = self.per_peer_state.write().unwrap();
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
if msg.channel_id == [0; 32] {
for chan in self.list_channels() {
loop {
let &(ref mtx, ref cvar) = &self.persistence_lock;
let mut guard = mtx.lock().unwrap();
+ if *guard {
+ *guard = false;
+ return;
+ }
guard = cvar.wait(guard).unwrap();
let result = *guard;
if result {
loop {
let &(ref mtx, ref cvar) = &self.persistence_lock;
let mut guard = mtx.lock().unwrap();
+ if *guard {
+ *guard = false;
+ return true;
+ }
guard = cvar.wait_timeout(guard, max_wait).unwrap().0;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
},
&PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => {
1u8.write(writer)?;
- payment_data.write(writer)?;
+ payment_data.payment_secret.write(writer)?;
+ payment_data.total_msat.write(writer)?;
incoming_cltv_expiry.write(writer)?;
},
}
short_channel_id: Readable::read(reader)?,
},
1u8 => PendingHTLCRouting::Receive {
- payment_data: Readable::read(reader)?,
+ payment_data: msgs::FinalOnionHopData {
+ payment_secret: Readable::read(reader)?,
+ total_msat: Readable::read(reader)?,
+ },
incoming_cltv_expiry: Readable::read(reader)?,
},
_ => return Err(DecodeError::InvalidValue),
incoming_packet_shared_secret
});
-impl_writeable!(ClaimableHTLC, 0, {
- prev_hop,
- value,
- payment_data,
- cltv_expiry
-});
+impl Writeable for ClaimableHTLC {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ self.prev_hop.write(writer)?;
+ self.value.write(writer)?;
+ self.payment_data.payment_secret.write(writer)?;
+ self.payment_data.total_msat.write(writer)?;
+ self.cltv_expiry.write(writer)
+ }
+}
+
+impl Readable for ClaimableHTLC {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ Ok(ClaimableHTLC {
+ prev_hop: Readable::read(reader)?,
+ value: Readable::read(reader)?,
+ payment_data: msgs::FinalOnionHopData {
+ payment_secret: Readable::read(reader)?,
+ total_msat: Readable::read(reader)?,
+ },
+ cltv_expiry: Readable::read(reader)?,
+ })
+ }
+}
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
}
}
+impl_writeable!(PendingInboundPayment, 0, {
+ payment_secret,
+ expiry_time,
+ user_payment_id,
+ payment_preimage,
+ min_value_msat
+});
+
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
self.genesis_hash.write(writer)?;
- (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
- self.last_block_hash.read().unwrap().write(writer)?;
+ {
+ let best_block = self.best_block.read().unwrap();
+ best_block.height().write(writer)?;
+ best_block.block_hash().write(writer)?;
+ }
let channel_state = self.channel_state.lock().unwrap();
let mut unfunded_channels = 0;
}
(self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
+ (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
+
+ let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
+ (pending_inbound_payments.len() as u64).write(writer)?;
+ for (hash, pending_payment) in pending_inbound_payments.iter() {
+ hash.write(writer)?;
+ pending_payment.write(writer)?;
+ }
+
+ let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+ (pending_outbound_payments.len() as u64).write(writer)?;
+ for session_priv in pending_outbound_payments.iter() {
+ session_priv.write(writer)?;
+ }
Ok(())
}
}
let genesis_hash: BlockHash = Readable::read(reader)?;
- let latest_block_height: u32 = Readable::read(reader)?;
- let last_block_hash: BlockHash = Readable::read(reader)?;
+ let best_block_height: u32 = Readable::read(reader)?;
+ let best_block_hash: BlockHash = Readable::read(reader)?;
let mut failed_htlcs = Vec::new();
}
let last_node_announcement_serial: u32 = Readable::read(reader)?;
+ let highest_seen_timestamp: u32 = Readable::read(reader)?;
+
+ let pending_inbound_payment_count: u64 = Readable::read(reader)?;
+ let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
+ for _ in 0..pending_inbound_payment_count {
+ if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
+ let pending_outbound_payments_count: u64 = Readable::read(reader)?;
+ let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32));
+ for _ in 0..pending_outbound_payments_count {
+ if !pending_outbound_payments.insert(Readable::read(reader)?) {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
chain_monitor: args.chain_monitor,
tx_broadcaster: args.tx_broadcaster,
- latest_block_height: AtomicUsize::new(latest_block_height as usize),
- last_block_hash: RwLock::new(last_block_hash),
- secp_ctx,
+ best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
channel_state: Mutex::new(ChannelHolder {
by_id,
claimable_htlcs,
pending_msg_events: Vec::new(),
}),
+ pending_inbound_payments: Mutex::new(pending_inbound_payments),
+ pending_outbound_payments: Mutex::new(pending_outbound_payments),
+
our_network_key: args.keys_manager.get_node_secret(),
+ our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
+ secp_ctx,
last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
+ highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
per_peer_state: RwLock::new(per_peer_state),
//TODO: Broadcast channel update for closed channels, but only after we've made a
//connection or two.
- Ok((last_block_hash.clone(), channel_manager))
+ Ok((best_block_hash.clone(), channel_manager))
}
}
mod tests {
use ln::channelmanager::PersistenceNotifier;
use std::sync::Arc;
- use std::sync::atomic::{AtomicBool, Ordering};
+ use core::sync::atomic::{AtomicBool, Ordering};
use std::thread;
- use std::time::Duration;
+ use core::time::Duration;
#[test]
fn test_wait_timeout() {
}
}
}
+
+#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
+pub mod bench {
+ use chain::Listen;
+ use chain::chainmonitor::ChainMonitor;
+ use chain::channelmonitor::Persist;
+ use chain::keysinterface::{KeysManager, InMemorySigner};
+ use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
+ use ln::features::{InitFeatures, InvoiceFeatures};
+ use ln::functional_test_utils::*;
+ use ln::msgs::ChannelMessageHandler;
+ use routing::network_graph::NetworkGraph;
+ use routing::router::get_route;
+ use util::test_utils;
+ use util::config::UserConfig;
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+
+ use bitcoin::hashes::Hash;
+ use bitcoin::hashes::sha256::Hash as Sha256;
+ use bitcoin::{Block, BlockHeader, Transaction, TxOut};
+
+ use std::sync::Mutex;
+
+ use test::Bencher;
+
+ struct NodeHolder<'a, P: Persist<InMemorySigner>> {
+ node: &'a ChannelManager<InMemorySigner,
+ &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
+ &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
+ &'a test_utils::TestLogger, &'a P>,
+ &'a test_utils::TestBroadcaster, &'a KeysManager,
+ &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>
+ }
+
+ #[cfg(test)]
+ #[bench]
+ fn bench_sends(bench: &mut Bencher) {
+ bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+ }
+
+ pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+ // Do a simple benchmark of sending a payment back and forth between two nodes.
+ // Note that this is unrealistic as each payment send will require at least two fsync
+ // calls per node.
+ let network = bitcoin::Network::Testnet;
+ let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
+
+ let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
+ let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+
+ let mut config: UserConfig = Default::default();
+ config.own_channel_config.minimum_depth = 1;
+
+ let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
+ let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
+ let seed_a = [1u8; 32];
+ let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
+ let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &logger_a, &keys_manager_a, config.clone(), ChainParameters {
+ network,
+ best_block: BestBlock::from_genesis(network),
+ });
+ let node_a_holder = NodeHolder { node: &node_a };
+
+ let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
+ let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
+ let seed_b = [2u8; 32];
+ let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
+ let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &logger_b, &keys_manager_b, config.clone(), ChainParameters {
+ network,
+ best_block: BestBlock::from_genesis(network),
+ });
+ let node_b_holder = NodeHolder { node: &node_b };
+
+ node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
+ node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
+ node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
+
+ let tx;
+ if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
+ tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ value: 8_000_000, script_pubkey: output_script,
+ }]};
+ node_a.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+ } else { panic!(); }
+
+ node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
+ node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
+
+ assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
+
+ let block = Block {
+ header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ txdata: vec![tx],
+ };
+ Listen::block_connected(&node_a, &block, 1);
+ Listen::block_connected(&node_b, &block, 1);
+
+ node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
+ node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+
+ let dummy_graph = NetworkGraph::new(genesis_hash);
+
+ let mut payment_count: u64 = 0;
+ macro_rules! send_payment {
+ ($node_a: expr, $node_b: expr) => {
+ let usable_channels = $node_a.list_usable_channels();
+ let route = get_route(&$node_a.get_our_node_id(), &dummy_graph, &$node_b.get_our_node_id(), Some(InvoiceFeatures::known()),
+ Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a).unwrap();
+
+ let mut payment_preimage = PaymentPreimage([0; 32]);
+ payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
+ payment_count += 1;
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+ let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap();
+
+ $node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
+ $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
+ $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
+ let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
+ $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
+ $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
+ $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
+
+ expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
+ expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
+ assert!($node_b.claim_funds(payment_preimage));
+
+ match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
+ MessageSendEvent::UpdateHTLCs { node_id, updates } => {
+ assert_eq!(node_id, $node_a.get_our_node_id());
+ $node_a.handle_update_fulfill_htlc(&$node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &updates.commitment_signed);
+ },
+ _ => panic!("Failed to generate claim event"),
+ }
+
+ let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
+ $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
+ $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
+ $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
+
+ expect_payment_sent!(NodeHolder { node: &$node_a }, payment_preimage);
+ }
+ }
+
+ bench.iter(|| {
+ send_payment!(node_a, node_b);
+ send_payment!(node_b, node_a);
+ });
+ }
+}