X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=6ab97c2420e0fd1bab7da472de68ff4d27ff684a;hb=2c77c9b021bdbcf57a2e40097e73722f1770e5e0;hp=47ea356442398d461e68f4656b81e0d35ffe1b79;hpb=ba600db7931d8f87a738ef077db3583554af60cb;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 47ea3564..6ab97c24 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -36,16 +36,14 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1; use chain; -use chain::Confirm; -use chain::Watch; -use chain::chaininterface::{BroadcasterInterface, FeeEstimator}; +use chain::{Confirm, Watch, BestBlock}; +use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use ln::{PaymentHash, PaymentPreimage, PaymentSecret}; -pub use ln::channel::CounterpartyForwardingInfo; -use ln::channel::{Channel, ChannelError, ChannelUpdateStatus}; +use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; use ln::msgs; @@ -54,24 +52,24 @@ use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner}; use util::config::UserConfig; -use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; -use util::logger::Logger; +use util::logger::{Logger, Level}; use util::errors::APIError; +use io; use prelude::*; use core::{cmp, mem}; use core::cell::RefCell; -use std::io::{Cursor, Read}; -use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; +use io::{Cursor, Read}; +use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; #[cfg(any(test, feature = "allow_wallclock_use"))] use std::time::Instant; use core::ops::Deref; -use bitcoin::hashes::hex::ToHex; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -100,6 +98,10 @@ enum PendingHTLCRouting { payment_data: msgs::FinalOnionHopData, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed }, + ReceiveKeysend { + payment_preimage: PaymentPreimage, + incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed + }, } #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug @@ -154,16 +156,38 @@ pub(crate) struct HTLCPreviousHopData { outpoint: OutPoint, } -struct ClaimableHTLC { - prev_hop: HTLCPreviousHopData, - value: u64, +enum OnionPayload { /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a /// payment_secret which prevents path-probing attacks and can associate different HTLCs which /// are part of the same payment. - payment_data: msgs::FinalOnionHopData, + Invoice(msgs::FinalOnionHopData), + /// Contains the payer-provided preimage. + Spontaneous(PaymentPreimage), +} + +struct ClaimableHTLC { + prev_hop: HTLCPreviousHopData, cltv_expiry: u32, + value: u64, + onion_payload: OnionPayload, +} + +/// A payment identifier used to correlate an MPP payment's per-path HTLC sources internally. +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub(crate) struct MppId(pub [u8; 32]); + +impl Writeable for MppId { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + self.0.write(w) + } } +impl Readable for MppId { + fn read(r: &mut R) -> Result { + let buf: [u8; 32] = Readable::read(r)?; + Ok(MppId(buf)) + } +} /// Tracks the inbound corresponding to an outbound HTLC #[derive(Clone, PartialEq)] pub(crate) enum HTLCSource { @@ -174,6 +198,7 @@ pub(crate) enum HTLCSource { /// Technically we can recalculate this from the route, but we cache it here to avoid /// doing a double-pass on route when we get a failure back first_hop_htlc_msat: u64, + mpp_id: MppId, }, } #[cfg(test)] @@ -183,6 +208,7 @@ impl HTLCSource { path: Vec::new(), session_priv: SecretKey::from_slice(&[1; 32]).unwrap(), first_hop_htlc_msat: 0, + mpp_id: MppId([2; 32]), } } } @@ -198,6 +224,14 @@ pub(super) enum HTLCFailReason { } } +/// Return value for claim_funds_from_hop +enum ClaimFundsFromHop { + PrevHopForceClosed, + MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option), + Success(u64), + DuplicateClaim, +} + type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a @@ -208,6 +242,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource struct MsgHandleErrInternal { err: msgs::LightningError, + chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { @@ -223,6 +258,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: None, shutdown_finish: None, } } @@ -233,12 +269,13 @@ impl MsgHandleErrInternal { err, action: msgs::ErrorAction::IgnoreError, }, + chan_id: None, shutdown_finish: None, } } #[inline] fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, shutdown_finish: None } + Self { err, chan_id: None, shutdown_finish: None } } #[inline] fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { @@ -252,6 +289,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: Some(channel_id), shutdown_finish: Some((shutdown_res, channel_update)), } } @@ -259,6 +297,10 @@ impl MsgHandleErrInternal { fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { + ChannelError::Warn(msg) => LightningError { + err: msg, + action: msgs::ErrorAction::IgnoreError, + }, ChannelError::Ignore(msg) => LightningError { err: msg, action: msgs::ErrorAction::IgnoreError, @@ -282,6 +324,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: None, shutdown_finish: None, } } @@ -452,8 +495,11 @@ pub struct ChannelManager>, + pending_outbound_payments: Mutex>>, our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -473,6 +519,8 @@ pub struct ChannelManager>>, pending_events: Mutex>, @@ -508,34 +556,6 @@ pub struct ChainParameters { pub best_block: BestBlock, } -/// The best known block as identified by its hash and height. -#[derive(Clone, Copy, PartialEq)] -pub struct BestBlock { - block_hash: BlockHash, - height: u32, -} - -impl BestBlock { - /// Returns the best block from the genesis of the given network. - pub fn from_genesis(network: Network) -> Self { - BestBlock { - block_hash: genesis_block(network).header.block_hash(), - height: 0, - } - } - - /// Returns the best block as identified by the given block hash and height. - pub fn new(block_hash: BlockHash, height: u32) -> Self { - BestBlock { block_hash, height } - } - - /// Returns the best block hash. - pub fn block_hash(&self) -> BlockHash { self.block_hash } - - /// Returns the best block height. - pub fn height(&self) -> u32 { self.height } -} - #[derive(Copy, Clone, PartialEq)] enum NotifyOption { DoPersist, @@ -631,6 +651,42 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRA #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; +/// Information needed for constructing an invoice route hint for this channel. +#[derive(Clone, Debug, PartialEq)] +pub struct CounterpartyForwardingInfo { + /// Base routing fee in millisatoshis. + pub fee_base_msat: u32, + /// Amount in millionths of a satoshi the channel will charge per transferred satoshi. + pub fee_proportional_millionths: u32, + /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart, + /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s + /// `cltv_expiry_delta` for more details. + pub cltv_expiry_delta: u16, +} + +/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`] +/// to better separate parameters. +#[derive(Clone, Debug, PartialEq)] +pub struct ChannelCounterparty { + /// The node_id of our counterparty + pub node_id: PublicKey, + /// The Features the channel counterparty provided upon last connection. + /// Useful for routing as it is the most up-to-date copy of the counterparty's features and + /// many routing-relevant features are present in the init context. + pub features: InitFeatures, + /// The value, in satoshis, that must always be held in the channel for our counterparty. This + /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by + /// claiming at least this value on chain. + /// + /// This value is not included in [`inbound_capacity_msat`] as it can never be spent. + /// + /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat + pub unspendable_punishment_reserve: u64, + /// Information on the fees and requirements that the counterparty requires when forwarding + /// payments to us through this channel. + pub forwarding_info: Option, +} + /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels #[derive(Clone, Debug, PartialEq)] pub struct ChannelDetails { @@ -639,6 +695,8 @@ pub struct ChannelDetails { /// Note that this means this value is *not* persistent - it can change once during the /// lifetime of the channel. pub channel_id: [u8; 32], + /// Parameters which apply to our counterparty. See individual fields for more information. + pub counterparty: ChannelCounterparty, /// The Channel's funding transaction output, if we've negotiated the funding transaction with /// our counterparty already. /// @@ -648,33 +706,68 @@ pub struct ChannelDetails { /// The position of the funding transaction in the chain. None if the funding transaction has /// not yet been confirmed and the channel fully opened. pub short_channel_id: Option, - /// The node_id of our counterparty - pub remote_network_id: PublicKey, - /// The Features the channel counterparty provided upon last connection. - /// Useful for routing as it is the most up-to-date copy of the counterparty's features and - /// many routing-relevant features are present in the init context. - pub counterparty_features: InitFeatures, /// The value, in satoshis, of this channel as appears in the funding output pub channel_value_satoshis: u64, + /// The value, in satoshis, that must always be held in the channel for us. This value ensures + /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least + /// this value on chain. + /// + /// This value is not included in [`outbound_capacity_msat`] as it can never be spent. + /// + /// This value will be `None` for outbound channels until the counterparty accepts the channel. + /// + /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat + pub unspendable_punishment_reserve: Option, /// The user_id passed in to create_channel, or 0 if the channel was inbound. pub user_id: u64, /// The available outbound capacity for sending HTLCs to the remote peer. This does not include /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not /// available for inclusion in new outbound HTLCs). This further does not include any pending /// outgoing HTLCs which are awaiting some other resolution to be sent. + /// + /// This value is not exact. Due to various in-flight changes, feerate changes, and our + /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we + /// should be able to spend nearly this amount. pub outbound_capacity_msat: u64, /// The available inbound capacity for the remote peer to send HTLCs to us. This does not /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not /// available for inclusion in new inbound HTLCs). /// Note that there are some corner cases not fully handled here, so the actual available /// inbound capacity may be slightly higher than this. + /// + /// This value is not exact. Due to various in-flight changes, feerate changes, and our + /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable. + /// However, our counterparty should be able to spend nearly this amount. pub inbound_capacity_msat: u64, + /// The number of required confirmations on the funding transaction before the funding will be + /// considered "locked". This number is selected by the channel fundee (i.e. us if + /// [`is_outbound`] is *not* set), and can be selected for inbound channels with + /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with + /// [`ChannelHandshakeLimits::max_minimum_depth`]. + /// + /// This value will be `None` for outbound channels until the counterparty accepts the channel. + /// + /// [`is_outbound`]: ChannelDetails::is_outbound + /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth + /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth + pub confirmations_required: Option, + /// The number of blocks (after our commitment transaction confirms) that we will need to wait + /// until we can claim our funds after we force-close the channel. During this time our + /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty + /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any + /// time to claim our non-HTLC-encumbered funds. + /// + /// This value will be `None` for outbound channels until the counterparty accepts the channel. + pub force_close_spend_delay: Option, /// True if the channel was initiated (and thus funded) by us. pub is_outbound: bool, /// True if the channel is confirmed, funding_locked messages have been exchanged, and the /// channel is not currently being shut down. `funding_locked` message exchange implies the /// required confirmation count has been reached (and we were connected to the peer at some - /// point after the funding transaction received enough confirmations). + /// point after the funding transaction received enough confirmations). The required + /// confirmation count is provided in [`confirmations_required`]. + /// + /// [`confirmations_required`]: ChannelDetails::confirmations_required pub is_funding_locked: bool, /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b) /// the peer is connected, and (c) the channel is not currently negotiating a shutdown. @@ -683,9 +776,6 @@ pub struct ChannelDetails { pub is_usable: bool, /// True if this channel is (or will be) publicly-announced. pub is_public: bool, - /// Information on the fees and requirements that the counterparty requires when forwarding - /// payments to us through this channel. - pub counterparty_forwarding_info: Option, } /// If a payment fails to send, it can be in one of several states. This enum is returned as the @@ -728,12 +818,13 @@ macro_rules! handle_error { ($self: ident, $internal: expr, $counterparty_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish }) => { + Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { #[cfg(debug_assertions)] { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. assert!($self.channel_state.try_lock().is_ok()); + assert!($self.pending_events.try_lock().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -745,6 +836,9 @@ macro_rules! handle_error { msg: update }); } + if let Some(channel_id) = chan_id { + $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, reason: ClosureReason::ProcessingError { err: err.err.clone() } }); + } } log_error!($self.logger, "{}", err.err); @@ -771,6 +865,11 @@ macro_rules! handle_error { macro_rules! convert_chan_err { ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { match $err { + ChannelError::Warn(msg) => { + //TODO: Once warning messages are merged, we should send a `warning` message to our + //peer here. + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) + }, ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, @@ -824,6 +923,18 @@ macro_rules! try_chan_entry { } } +macro_rules! remove_channel { + ($channel_state: expr, $entry: expr) => { + { + let channel = $entry.remove_entry().1; + if let Some(short_id) = channel.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + channel + } + } +} + macro_rules! handle_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) @@ -1069,7 +1180,7 @@ impl ChannelMana pending_msg_events: Vec::new(), }), pending_inbound_payments: Mutex::new(HashMap::new()), - pending_outbound_payments: Mutex::new(HashSet::new()), + pending_outbound_payments: Mutex::new(HashMap::new()), our_network_key: keys_manager.get_node_secret(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), @@ -1109,13 +1220,27 @@ impl ChannelMana /// /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000. + /// + /// Note that we do not check if you are currently connected to the given peer. If no + /// connection is available, the outbound `open_channel` message may fail to send, resulting in + /// the channel eventually being silently forgotten. pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option) -> Result<(), APIError> { if channel_value_satoshis < 1000 { return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } - let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?; + let channel = { + let per_peer_state = self.per_peer_state.read().unwrap(); + match per_peer_state.get(&their_network_key) { + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; + Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)? + }, + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), + } + }; let res = channel.get_open_channel(self.genesis_hash.clone()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -1147,28 +1272,36 @@ impl ChannelMana res.reserve(channel_state.by_id.len()); for (channel_id, channel) in channel_state.by_id.iter().filter(f) { let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat(); + let (to_remote_reserve_satoshis, to_self_reserve_satoshis) = + channel.get_holder_counterparty_selected_channel_reserve_satoshis(); res.push(ChannelDetails { channel_id: (*channel_id).clone(), + counterparty: ChannelCounterparty { + node_id: channel.get_counterparty_node_id(), + features: InitFeatures::empty(), + unspendable_punishment_reserve: to_remote_reserve_satoshis, + forwarding_info: channel.counterparty_forwarding_info(), + }, funding_txo: channel.get_funding_txo(), short_channel_id: channel.get_short_channel_id(), - remote_network_id: channel.get_counterparty_node_id(), - counterparty_features: InitFeatures::empty(), channel_value_satoshis: channel.get_value_satoshis(), + unspendable_punishment_reserve: to_self_reserve_satoshis, inbound_capacity_msat, outbound_capacity_msat, user_id: channel.get_user_id(), + confirmations_required: channel.minimum_depth(), + force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), is_outbound: channel.is_outbound(), is_funding_locked: channel.is_usable(), is_usable: channel.is_live(), is_public: channel.should_announce(), - counterparty_forwarding_info: channel.counterparty_forwarding_info(), }); } } let per_peer_state = self.per_peer_state.read().unwrap(); for chan in res.iter_mut() { - if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) { - chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone(); + if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) { + chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone(); } } res @@ -1193,51 +1326,117 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } - /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs - /// will be accepted on the given channel, and after additional timeout/the closing of all - /// pending HTLCs, the channel will be closed on chain. - /// - /// May generate a SendShutdown message event on success, which should be relayed. - pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let (mut failed_htlcs, chan_option) = { + let counterparty_node_id; + let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; + let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?; + counterparty_node_id = chan_entry.get().get_counterparty_node_id(); + let per_peer_state = self.per_peer_state.read().unwrap(); + let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) { + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? + }, + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), + }; + failed_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { + let (result, is_permanent) = + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key()); + if is_permanent { + remove_channel!(channel_state, chan_entry); + break result; + } + } + } + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: chan_entry.get().get_counterparty_node_id(), + node_id: counterparty_node_id, msg: shutdown_msg }); + if chan_entry.get().is_shutdown() { - if let Some(short_id) = chan_entry.get().get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); + let channel = remove_channel!(channel_state, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); } - (failed_htlcs, Some(chan_entry.remove_entry().1)) - } else { (failed_htlcs, None) } + if let Ok(mut pending_events_lock) = self.pending_events.lock() { + pending_events_lock.push(events::Event::ChannelClosed { + channel_id: *channel_id, + reason: ClosureReason::HolderForceClosed + }); + } + } + break Ok(()); }, hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}) } }; + for htlc_source in failed_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } - let chan_update = if let Some(chan) = chan_option { - self.get_channel_update_for_broadcast(&chan).ok() - } else { None }; - - if let Some(update) = chan_update { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } + let _ = handle_error!(self, result, counterparty_node_id); Ok(()) } + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// * If we are the channel initiator, we will pay between our [`Background`] and + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate. + /// * If our counterparty is the channel initiator, we will require a channel closing + /// transaction feerate of at least our [`Background`] feerate or the feerate which + /// would appear on a force-closure transaction, whichever is lower. We will allow our + /// counterparty to pay as much fee as they'd like, however. + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + self.close_channel_internal(channel_id, None) + } + + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated + /// the channel being closed or not: + /// * If we are the channel initiator, we will pay at least this feerate on the closing + /// transaction. The upper-bound is set by + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). + /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure + /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which + /// will appear on a force-closure transaction, whichever is lower). + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { + self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight)) + } + #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (monitor_update_option, mut failed_htlcs) = shutdown_res; @@ -1254,7 +1453,9 @@ impl ChannelMana } } - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result { + /// `peer_node_id` should be set when we receive a message from a peer, but not set when the user closes, which will be re-exposed as the `ChannelClosed` + /// reason. + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -1267,6 +1468,14 @@ impl ChannelMana if let Some(short_id) = chan.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } + let mut pending_events_lock = self.pending_events.lock().unwrap(); + if peer_node_id.is_some() { + if let Some(peer_msg) = peer_msg { + pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } }); + } + } else { + pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed }); + } chan.remove_entry().1 } else { return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); @@ -1288,7 +1497,7 @@ impl ChannelMana /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager. pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - match self.force_close_channel_with_peer(channel_id, None) { + match self.force_close_channel_with_peer(channel_id, None, None) { Ok(counterparty_node_id) => { self.channel_state.lock().unwrap().pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -1374,8 +1583,8 @@ impl ChannelMana let mut chacha = ChaCha20::new(&rho, &[0u8; 8]); let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) }; - let (next_hop_data, next_hop_hmac) = { - match msgs::OnionHopData::read(&mut chacha_stream) { + let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = { + match ::read(&mut chacha_stream) { Err(err) => { let error_code = match err { msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte @@ -1397,121 +1606,141 @@ impl ChannelMana }; let pending_forward_info = if next_hop_hmac == [0; 32] { - #[cfg(test)] - { - // In tests, make sure that the initial onion pcket data is, at least, non-0. - // We could do some fancy randomness test here, but, ehh, whatever. - // This checks for the issue where you can calculate the path length given the - // onion data as all the path entries that the originator sent will be here - // as-is (and were originally 0s). - // Of course reverse path calculation is still pretty easy given naive routing - // algorithms, but this fixes the most-obvious case. - let mut next_bytes = [0; 32]; - chacha_stream.read_exact(&mut next_bytes).unwrap(); - assert_ne!(next_bytes[..], [0; 32][..]); - chacha_stream.read_exact(&mut next_bytes).unwrap(); - assert_ne!(next_bytes[..], [0; 32][..]); - } - - // OUR PAYMENT! - // final_expiry_too_soon - // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least - // HTLC_FAIL_BACK_BUFFER blocks to go. - // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward - // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational). - if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { - return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); - } - // final_incorrect_htlc_amount - if next_hop_data.amt_to_forward > msg.amount_msat { - return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); - } - // final_incorrect_cltv_expiry - if next_hop_data.outgoing_cltv_value != msg.cltv_expiry { - return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); - } - - let payment_data = match next_hop_data.format { - msgs::OnionHopDataFormat::Legacy { .. } => None, - msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]), - msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data, - }; + #[cfg(test)] + { + // In tests, make sure that the initial onion pcket data is, at least, non-0. + // We could do some fancy randomness test here, but, ehh, whatever. + // This checks for the issue where you can calculate the path length given the + // onion data as all the path entries that the originator sent will be here + // as-is (and were originally 0s). + // Of course reverse path calculation is still pretty easy given naive routing + // algorithms, but this fixes the most-obvious case. + let mut next_bytes = [0; 32]; + chacha_stream.read_exact(&mut next_bytes).unwrap(); + assert_ne!(next_bytes[..], [0; 32][..]); + chacha_stream.read_exact(&mut next_bytes).unwrap(); + assert_ne!(next_bytes[..], [0; 32][..]); + } - if payment_data.is_none() { - return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]); - } + // OUR PAYMENT! + // final_expiry_too_soon + // We have to have some headroom to broadcast on chain if we have the preimage, so make sure + // we have at least HTLC_FAIL_BACK_BUFFER blocks to go. + // Also, ensure that, in the case of an unknown preimage for the received payment hash, our + // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a + // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). + if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { + return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); + } + // final_incorrect_htlc_amount + if next_hop_data.amt_to_forward > msg.amount_msat { + return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); + } + // final_incorrect_cltv_expiry + if next_hop_data.outgoing_cltv_value != msg.cltv_expiry { + return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); + } - // Note that we could obviously respond immediately with an update_fulfill_htlc - // message, however that would leak that we are the recipient of this payment, so - // instead we stay symmetric with the forwarding case, only responding (after a - // delay) once they've send us a commitment_signed! + let routing = match next_hop_data.format { + msgs::OnionHopDataFormat::Legacy { .. } => return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]), + msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]), + msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => { + if payment_data.is_some() && keysend_preimage.is_some() { + return_err!("We don't support MPP keysend payments", 0x4000|22, &[0;0]); + } else if let Some(data) = payment_data { + PendingHTLCRouting::Receive { + payment_data: data, + incoming_cltv_expiry: msg.cltv_expiry, + } + } else if let Some(payment_preimage) = keysend_preimage { + // We need to check that the sender knows the keysend preimage before processing this + // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X + // could discover the final destination of X, by probing the adjacent nodes on the route + // with a keysend payment of identical payment hash to X and observing the processing + // time discrepancies due to a hash collision with X. + let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + if hashed_preimage != msg.payment_hash { + return_err!("Payment preimage didn't match payment hash", 0x4000|22, &[0;0]); + } - PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { - payment_data: payment_data.unwrap(), - incoming_cltv_expiry: msg.cltv_expiry, - }, - payment_hash: msg.payment_hash.clone(), - incoming_shared_secret: shared_secret, - amt_to_forward: next_hop_data.amt_to_forward, - outgoing_cltv_value: next_hop_data.outgoing_cltv_value, - }) - } else { - let mut new_packet_data = [0; 20*65]; - let read_pos = chacha_stream.read(&mut new_packet_data).unwrap(); - #[cfg(debug_assertions)] - { - // Check two things: - // a) that the behavior of our stream here will return Ok(0) even if the TLV - // read above emptied out our buffer and the unwrap() wont needlessly panic - // b) that we didn't somehow magically end up with extra data. - let mut t = [0; 1]; - debug_assert!(chacha_stream.read(&mut t).unwrap() == 0); - } - // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we - // fill the onion hop data we'll forward to our next-hop peer. - chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]); + PendingHTLCRouting::ReceiveKeysend { + payment_preimage, + incoming_cltv_expiry: msg.cltv_expiry, + } + } else { + return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]); + } + }, + }; - let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap(); + // Note that we could obviously respond immediately with an update_fulfill_htlc + // message, however that would leak that we are the recipient of this payment, so + // instead we stay symmetric with the forwarding case, only responding (after a + // delay) once they've send us a commitment_signed! + + PendingHTLCStatus::Forward(PendingHTLCInfo { + routing, + payment_hash: msg.payment_hash.clone(), + incoming_shared_secret: shared_secret, + amt_to_forward: next_hop_data.amt_to_forward, + outgoing_cltv_value: next_hop_data.outgoing_cltv_value, + }) + } else { + let mut new_packet_data = [0; 20*65]; + let read_pos = chacha_stream.read(&mut new_packet_data).unwrap(); + #[cfg(debug_assertions)] + { + // Check two things: + // a) that the behavior of our stream here will return Ok(0) even if the TLV + // read above emptied out our buffer and the unwrap() wont needlessly panic + // b) that we didn't somehow magically end up with extra data. + let mut t = [0; 1]; + debug_assert!(chacha_stream.read(&mut t).unwrap() == 0); + } + // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we + // fill the onion hop data we'll forward to our next-hop peer. + chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]); - let blinding_factor = { - let mut sha = Sha256::engine(); - sha.input(&new_pubkey.serialize()[..]); - sha.input(&shared_secret); - Sha256::from_engine(sha).into_inner() - }; + let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap(); - let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) { - Err(e) - } else { Ok(new_pubkey) }; + let blinding_factor = { + let mut sha = Sha256::engine(); + sha.input(&new_pubkey.serialize()[..]); + sha.input(&shared_secret); + Sha256::from_engine(sha).into_inner() + }; - let outgoing_packet = msgs::OnionPacket { - version: 0, - public_key, - hop_data: new_packet_data, - hmac: next_hop_hmac.clone(), - }; + let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) { + Err(e) + } else { Ok(new_pubkey) }; - let short_channel_id = match next_hop_data.format { - msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id, - msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, - msgs::OnionHopDataFormat::FinalNode { .. } => { - return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); - }, - }; + let outgoing_packet = msgs::OnionPacket { + version: 0, + public_key, + hop_data: new_packet_data, + hmac: next_hop_hmac.clone(), + }; - PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Forward { - onion_packet: outgoing_packet, - short_channel_id, - }, - payment_hash: msg.payment_hash.clone(), - incoming_shared_secret: shared_secret, - amt_to_forward: next_hop_data.amt_to_forward, - outgoing_cltv_value: next_hop_data.outgoing_cltv_value, - }) + let short_channel_id = match next_hop_data.format { + msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id, + msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, + msgs::OnionHopDataFormat::FinalNode { .. } => { + return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); + }, }; + PendingHTLCStatus::Forward(PendingHTLCInfo { + routing: PendingHTLCRouting::Forward { + onion_packet: outgoing_packet, + short_channel_id, + }, + payment_hash: msg.payment_hash.clone(), + incoming_shared_secret: shared_secret, + amt_to_forward: next_hop_data.amt_to_forward, + outgoing_cltv_value: next_hop_data.outgoing_cltv_value, + }) + }; + channel_state = Some(self.channel_state.lock().unwrap()); if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel @@ -1519,15 +1748,23 @@ impl ChannelMana // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); - let forwarding_id = match id_option { - None => { // unknown_next_peer - return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]); - }, - Some(id) => id.clone(), - }; if let Some((err, code, chan_update)) = loop { + let forwarding_id = match id_option { + None => { // unknown_next_peer + break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + }, + Some(id) => id.clone(), + }; + let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); + if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { + // Note that the behavior here should be identical to the above block - we + // should NOT reveal the existence or non-existence of a private channel if + // we don't allow forwards outbound over them. + break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + } + // Note that we could technically not return an error yet here and just hope // that the connection is reestablished or monitor updated by the time we get // around to doing the actual forward, but better to fail early if we can and @@ -1539,7 +1776,9 @@ impl ChannelMana if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } - let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) }); + let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64) + .and_then(|prop_fee| { (prop_fee / 1000000) + .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) }); if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } @@ -1624,7 +1863,7 @@ impl ChannelMana cltv_expiry_delta: chan.get_cltv_expiry_delta(), htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()), - fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator), + fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(), fee_proportional_millionths: chan.get_fee_proportional_millionths(), excess_data: Vec::new(), }; @@ -1639,7 +1878,7 @@ impl ChannelMana } // Only public for testing, this should otherwise never be called direcly - pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32) -> Result<(), APIError> { + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, mpp_id: MppId, keysend_preimage: &Option) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let prng_seed = self.keys_manager.get_secure_random_bytes(); let session_priv_bytes = self.keys_manager.get_secure_random_bytes(); @@ -1647,14 +1886,16 @@ impl ChannelMana let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?; - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?; + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?; if onion_utils::route_size_insane(&onion_payloads) { return Err(APIError::RouteError{err: "Route size too large considering onion data"}); } let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes)); + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + let sessions = pending_outbounds.entry(mpp_id).or_insert(HashSet::new()); + assert!(sessions.insert(session_priv_bytes)); let err: Result<(), _> = loop { let mut channel_lock = self.channel_state.lock().unwrap(); @@ -1676,6 +1917,7 @@ impl ChannelMana path: path.clone(), session_priv: session_priv.clone(), first_hop_htlc_msat: htlc_msat, + mpp_id, }, onion_packet, &self.logger), channel_state, chan) } { Some((update_add, commitment_signed, monitor_update)) => { @@ -1756,6 +1998,10 @@ impl ChannelMana /// bit set (either as required or as available). If multiple paths are present in the Route, /// we assume the invoice had the basic_mpp feature set. pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result<(), PaymentSendFailure> { + self.send_payment_internal(route, payment_hash, payment_secret, None) + } + + fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option) -> Result<(), PaymentSendFailure> { if route.paths.len() < 1 { return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); } @@ -1765,9 +2011,13 @@ impl ChannelMana // for now more than 10 paths likely carries too much one-path failure. return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"})); } + if payment_secret.is_none() && route.paths.len() > 1 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()})); + } let mut total_value = 0; let our_node_id = self.get_our_node_id(); let mut path_errs = Vec::with_capacity(route.paths.len()); + let mpp_id = MppId(self.keys_manager.get_secure_random_bytes()); 'path_check: for path in route.paths.iter() { if path.len() < 1 || path.len() > 20 { path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); @@ -1789,7 +2039,7 @@ impl ChannelMana let cur_height = self.best_block.read().unwrap().height() + 1; let mut results = Vec::new(); for path in route.paths.iter() { - results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height)); + results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, mpp_id, &keysend_preimage)); } let mut has_ok = false; let mut has_err = false; @@ -1813,6 +2063,32 @@ impl ChannelMana } } + /// Send a spontaneous payment, which is a payment that does not require the recipient to have + /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify + /// the preimage, it must be a cryptographically secure random value that no intermediate node + /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will + /// never reach the recipient. + /// + /// See [`send_payment`] documentation for more details on the return value of this function. + /// + /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See + /// [`send_payment`] for more information about the risks of duplicate preimage usage. + /// + /// Note that `route` must have exactly one path. + /// + /// [`send_payment`]: Self::send_payment + pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result { + let preimage = match payment_preimage { + Some(p) => p, + None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()), + }; + let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner()); + match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) { + Ok(()) => Ok(payment_hash), + Err(e) => Err(e) + } + } + /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. fn funding_transaction_generated_intern, &Transaction) -> Result> @@ -2156,15 +2432,16 @@ impl ChannelMana // close channel and then send error message to peer. let counterparty_node_id = chan.get().get_counterparty_node_id(); let err: Result<(), _> = match e { - ChannelError::Ignore(_) => { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { panic!("Stated return value requirements in send_commitment() were not met"); - }, + } ChannelError::Close(msg) => { log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); let (channel_id, mut channel) = chan.remove_entry(); if let Some(short_id) = channel.get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } + // ChannelClosed event is generated by handle_error for us. Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } @@ -2198,9 +2475,17 @@ impl ChannelMana for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry }, - incoming_shared_secret, payment_hash, amt_to_forward, .. }, + routing, incoming_shared_secret, payment_hash, amt_to_forward, .. }, prev_funding_outpoint } => { + let (cltv_expiry, onion_payload) = match routing { + PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry } => + (incoming_cltv_expiry, OnionPayload::Invoice(payment_data)), + PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } => + (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage)), + _ => { + panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); + } + }; let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, @@ -2209,8 +2494,8 @@ impl ChannelMana incoming_packet_shared_secret: incoming_shared_secret, }, value: amt_to_forward, - payment_data: payment_data.clone(), - cltv_expiry: incoming_cltv_expiry, + cltv_expiry, + onion_payload, }; macro_rules! fail_htlc { @@ -2239,10 +2524,38 @@ impl ChannelMana let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); match payment_secrets.entry(payment_hash) { hash_map::Entry::Vacant(_) => { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + match claimable_htlc.onion_payload { + OnionPayload::Invoice(_) => { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + }, + OnionPayload::Spontaneous(preimage) => { + match channel_state.claimable_htlcs.entry(payment_hash) { + hash_map::Entry::Vacant(e) => { + e.insert(vec![claimable_htlc]); + new_events.push(events::Event::PaymentReceived { + payment_hash, + amt: amt_to_forward, + purpose: events::PaymentPurpose::SpontaneousPayment(preimage), + }); + }, + hash_map::Entry::Occupied(_) => { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + } + } + } + } }, hash_map::Entry::Occupied(inbound_payment) => { + let payment_data = + if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload { + data.clone() + } else { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + continue + }; if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc); @@ -2254,15 +2567,27 @@ impl ChannelMana let mut total_value = 0; let htlcs = channel_state.claimable_htlcs.entry(payment_hash) .or_insert(Vec::new()); + if htlcs.len() == 1 { + if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + continue + } + } htlcs.push(claimable_htlc); for htlc in htlcs.iter() { total_value += htlc.value; - if htlc.payment_data.total_msat != payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", - log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat); - total_value = msgs::MAX_VALUE_MSAT; + match &htlc.onion_payload { + OnionPayload::Invoice(htlc_payment_data) => { + if htlc_payment_data.total_msat != payment_data.total_msat { + log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", + log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat); + total_value = msgs::MAX_VALUE_MSAT; + } + if total_value >= msgs::MAX_VALUE_MSAT { break; } + }, + _ => unreachable!(), } - if total_value >= msgs::MAX_VALUE_MSAT { break; } } if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat { log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)", @@ -2273,10 +2598,12 @@ impl ChannelMana } else if total_value == payment_data.total_msat { new_events.push(events::Event::PaymentReceived { payment_hash, - payment_preimage: inbound_payment.get().payment_preimage, - payment_secret: payment_data.payment_secret, + purpose: events::PaymentPurpose::InvoicePayment { + payment_preimage: inbound_payment.get().payment_preimage, + payment_secret: payment_data.payment_secret, + user_payment_id: inbound_payment.get().user_payment_id, + }, amt: total_value, - user_payment_id: inbound_payment.get().user_payment_id, }); // Only ever generate at most one PaymentReceived // per registered payment_hash, even if it isn't @@ -2291,9 +2618,6 @@ impl ChannelMana }, }; }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); - }, HTLCForwardInfo::FailHTLC { .. } => { panic!("Got pending fail of our own HTLC"); } @@ -2347,48 +2671,160 @@ impl ChannelMana self.process_background_events(); } - /// If a peer is disconnected we mark any channels with that peer as 'disabled'. - /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate - /// to inform the network about the uselessness of these channels. + fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { + if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } + // If the feerate has decreased by less than half, don't bother + if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + return (true, NotifyOption::SkipPersist, Ok(())); + } + if !chan.is_live() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + return (true, NotifyOption::SkipPersist, Ok(())); + } + log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + + let mut retain_channel = true; + let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { + Ok(res) => Ok(res), + Err(e) => { + let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + if drop { retain_channel = false; } + Err(res) + } + }; + let ret_err = match res { + Ok(Some((update_fee, commitment_signed, monitor_update))) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id); + if drop { retain_channel = false; } + res + } else { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + Ok(()) + } + }, + Ok(None) => Ok(()), + Err(e) => Err(e), + }; + (retain_channel, NotifyOption::DoPersist, ret_err) + } + + #[cfg(fuzzing)] + /// In chanmon_consistency we want to sometimes do the channel fee updates done in + /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers + /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what + /// it wants to detect). Thus, we have a variant exposed here for its benefit. + pub fn maybe_update_chan_fees(&self) { + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + let mut should_persist = NotifyOption::SkipPersist; + + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push(err); + } + retain_channel + }); + } + + should_persist + }); + } + + /// Performs actions which should happen on startup and roughly once per minute thereafter. /// - /// This method handles all the details, and must be called roughly once per minute. + /// This currently includes: + /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels, + /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more + /// than a minute, informing the network that they should no longer attempt to route over + /// the channel. /// - /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call. + /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate + /// estimate fetches. pub fn timer_tick_occurred(&self) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = NotifyOption::SkipPersist; if self.process_background_events() { should_persist = NotifyOption::DoPersist; } - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - for (_, chan) in channel_state.by_id.iter_mut() { - match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), - ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Disabled); - }, - ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Enabled); - }, - _ => {}, - } + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + let counterparty_node_id = chan.get_counterparty_node_id(); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push((err, counterparty_node_id)); + } + if !retain_channel { return false; } + + if let Err(e) = chan.timer_check_closing_negotiation_progress() { + let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + handle_errors.push((Err(err), chan.get_counterparty_node_id())); + if needs_close { return false; } + } + + match chan.channel_update_status() { + ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), + ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), + ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), + ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), + ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Disabled); + }, + ChannelUpdateStatus::EnabledStaged if chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Enabled); + }, + _ => {}, + } + + true + }); } + for (err, counterparty_node_id) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } should_persist }); } @@ -2439,22 +2875,28 @@ impl ChannelMana self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); }, - HTLCSource::OutboundRoute { session_priv, .. } => { - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { - self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { - payment_hash, - rejected_by_dest: false, -#[cfg(test)] - error_code: None, -#[cfg(test)] - error_data: None, + HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => { + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) { + if sessions.get_mut().remove(&session_priv_bytes) { + self.pending_events.lock().unwrap().push( + events::Event::PaymentFailed { + payment_hash, + rejected_by_dest: false, + network_update: None, + all_paths_failed: sessions.get().len() == 0, + #[cfg(test)] + error_code: None, + #[cfg(test)] + error_data: None, + } + ); + if sessions.get().len() == 0 { + sessions.remove(); } - ) + } } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); } @@ -2479,12 +2921,21 @@ impl ChannelMana // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { - HTLCSource::OutboundRoute { ref path, session_priv, .. } => { - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { + HTLCSource::OutboundRoute { ref path, session_priv, mpp_id, .. } => { + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + let mut all_paths_failed = false; + if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) { + if !sessions.get_mut().remove(&session_priv_bytes) { + log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); + return; + } + if sessions.get().len() == 0 { + all_paths_failed = true; + sessions.remove(); + } + } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; } @@ -2493,23 +2944,18 @@ impl ChannelMana match &onion_error { &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] - let (channel_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] - let (channel_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); // TODO: If we decided to blame ourselves (or one of our channels) in // process_onion_failure we should close that channel as it implies our // next-hop is needlessly blaming us! - if let Some(update) = channel_update { - self.channel_state.lock().unwrap().pending_msg_events.push( - events::MessageSendEvent::PaymentFailureNetworkUpdate { - update, - } - ); - } self.pending_events.lock().unwrap().push( events::Event::PaymentFailed { payment_hash: payment_hash.clone(), rejected_by_dest: !payment_retryable, + network_update, + all_paths_failed, #[cfg(test)] error_code: onion_error_code, #[cfg(test)] @@ -2524,7 +2970,7 @@ impl ChannelMana ref data, .. } => { // we get a fail_malformed_htlc from the first hop - // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary + // TODO: We'd like to generate a NetworkUpdate for temporary // failures here, but that would be insufficient as get_route // generally ignores its view of our own channels as we provide them via // ChannelDetails. @@ -2534,6 +2980,8 @@ impl ChannelMana events::Event::PaymentFailed { payment_hash: payment_hash.clone(), rejected_by_dest: path.len() == 1, + network_update: None, + all_paths_failed, #[cfg(test)] error_code: Some(*failure_code), #[cfg(test)] @@ -2634,16 +3082,22 @@ impl ChannelMana HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }); } else { match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { - Err(Some(e)) => { - if let msgs::ErrorAction::IgnoreError = e.1.err.action { + ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { + if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the // HTLC when the monitor updating is restored (or on chain). - log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err); + log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); claimed_any_htlcs = true; - } else { errs.push(e); } + } else { errs.push((pk, err)); } }, - Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"), - Ok(()) => claimed_any_htlcs = true, + ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"), + ClaimFundsFromHop::DuplicateClaim => { + // While we should never get here in most cases, if we do, it likely + // indicates that the HTLC was timed out some time ago and is no longer + // available to be claimed. Thus, it does not make sense to set + // `claimed_any_htlcs`. + }, + ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true, } } } @@ -2661,103 +3115,132 @@ impl ChannelMana } else { false } } - fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> { + fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { Some(chan_id) => chan_id.clone(), None => { - return Err(None) + return ClaimFundsFromHop::PrevHopForceClosed } }; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { - let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { - Ok((msgs, monitor_option)) => { - if let Some(monitor_update) = monitor_option { + Ok(msgs_monitor_option) => { + if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - if was_frozen_for_monitor { - assert!(msgs.is_none()); - } else { - return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err()))); - } + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, + "Failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + return ClaimFundsFromHop::MonitorUpdateFail( + chan.get().get_counterparty_node_id(), + handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), + Some(htlc_value_msat) + ); } + if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + return ClaimFundsFromHop::Success(htlc_value_msat); + } else { + return ClaimFundsFromHop::DuplicateClaim; } - if let Some((msg, commitment_signed)) = msgs { - log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", - log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - } - }); - } - return Ok(()) }, - Err(e) => { - // TODO: Do something with e? - // This should only occur if we are claiming an HTLC at the same time as the - // HTLC is being failed (eg because a block is being connected and this caused - // an HTLC to time out). This should, of course, only occur if the user is the - // one doing the claiming (as it being a part of a peer claim would imply we're - // about to lose funds) and only if the lock in claim_funds was dropped as a - // previous HTLC was failed (thus not for an MPP payment). - debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e); - return Err(None) + Err((e, monitor_update)) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info }, + "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", + payment_preimage, e); + } + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id); + if drop { + chan.remove_entry(); + } + return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); }, } } else { unreachable!(); } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { match source { - HTLCSource::OutboundRoute { session_priv, .. } => { + HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => { mem::drop(channel_state_lock); - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentSent { - payment_preimage - }); + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + let found_payment = if let Some(mut sessions) = outbounds.remove(&mpp_id) { + sessions.remove(&session_priv_bytes) + } else { false }; + if found_payment { + self.pending_events.lock().unwrap().push( + events::Event::PaymentSent { payment_preimage } + ); } else { log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0)); } }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) { - Ok(()) => Ok(()), - Err(None) => { - let preimage_update = ChannelMonitorUpdate { - update_id: CLOSED_CHANNEL_UPDATE_ID, - updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage.clone(), - }], - }; - // We update the ChannelMonitor on the backward link, after - // receiving an offchain preimage event from the forward link (the - // event being update_fulfill_htlc). - if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { - log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, e); - } - Ok(()) - }, - Err(Some(res)) => Err(res), - } { - mem::drop(channel_state_lock); - let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); + let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage); + let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true }; + let htlc_claim_value_msat = match res { + ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt, + ClaimFundsFromHop::Success(amt) => Some(amt), + _ => None, + }; + if let ClaimFundsFromHop::PrevHopForceClosed = res { + let preimage_update = ChannelMonitorUpdate { + update_id: CLOSED_CHANNEL_UPDATE_ID, + updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage.clone(), + }], + }; + // We update the ChannelMonitor on the backward link, after + // receiving an offchain preimage event from the forward link (the + // event being update_fulfill_htlc). + if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { + log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + } + // Note that we do *not* set `claimed_htlc` to false here. In fact, this + // totally could be a duplicate claim, but we have no way of knowing + // without interrogating the `ChannelMonitor` we've provided the above + // update to. Instead, we simply document in `PaymentForwarded` that this + // can happen. + } + mem::drop(channel_state_lock); + if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res { + let result: Result<(), _> = Err(err); + let _ = handle_error!(self, result, pk); + } + + if claimed_htlc { + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { None }; + + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentForwarded { + fee_earned_msat, + claim_from_onchain_tx: from_onchain, + }); + } } }, } @@ -2831,7 +3314,7 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); } - let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration) + let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -2857,7 +3340,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan); + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, &their_features), channel_state, chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) @@ -2994,7 +3477,8 @@ impl ChannelMana } fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { - let (mut dropped_htlcs, chan_option) = { + let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>; + let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3003,25 +3487,36 @@ impl ChannelMana if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry); + + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } + + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); + dropped_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { + let (result, is_permanent) = + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key()); + if is_permanent { + remove_channel!(channel_state, chan_entry); + break result; + } + } + } + if let Some(msg) = shutdown { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id.clone(), - msg, - }); - } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), + node_id: *counterparty_node_id, msg, }); } - if chan_entry.get().is_shutdown() { - if let Some(short_id) = chan_entry.get().get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } - (dropped_htlcs, Some(chan_entry.remove_entry().1)) - } else { (dropped_htlcs, None) } + + break Ok(()); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -3029,14 +3524,8 @@ impl ChannelMana for htlc_source in dropped_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } - if let Some(chan) = chan_option { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - } + + let _ = handle_error!(self, result, *counterparty_node_id); Ok(()) } @@ -3082,6 +3571,7 @@ impl ChannelMana msg: update }); } + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, reason: ClosureReason::CooperativeClosure }); } Ok(()) } @@ -3106,33 +3596,34 @@ impl ChannelMana } let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { - // Ensure error_code has the UPDATE flag set, since by default we send a - // channel update along as part of failing the HTLC. - assert!((error_code & 0x1000) != 0); // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. match pending_forward_info { PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { - let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) { - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{ - let mut res = Vec::with_capacity(8 + 128); - // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791 - res.extend_from_slice(&byte_utils::be16_to_array(0)); - res.extend_from_slice(&upd.encode_with_len()[..]); - res - }[..]) + let reason = if (error_code & 0x1000) != 0 { + if let Ok(upd) = self.get_channel_update_for_unicast(chan) { + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{ + let mut res = Vec::with_capacity(8 + 128); + // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791 + res.extend_from_slice(&byte_utils::be16_to_array(0)); + res.extend_from_slice(&upd.encode_with_len()[..]); + res + }[..]) + } else { + // The only case where we'd be unable to + // successfully get a channel update is if the + // channel isn't in the fully-funded state yet, + // implying our counterparty is trying to route + // payments over the channel back to themselves + // (because no one else should know the short_id + // is a lightning channel yet). We should have + // no problem just calling this + // unknown_next_peer (0x4000|10). + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[]) + } } else { - // The only case where we'd be unable to - // successfully get a channel update is if the - // channel isn't in the fully-funded state yet, - // implying our counterparty is trying to route - // payments over the channel back to themselves - // (cause no one else should know the short_id - // is a lightning channel yet). We should have - // no problem just calling this - // unknown_next_peer (0x4000|10). - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[]) + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) }; let msg = msgs::UpdateFailHTLC { channel_id: msg.channel_id, @@ -3153,7 +3644,7 @@ impl ChannelMana fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); - let htlc_source = { + let (htlc_source, forwarded_htlc_value) = { let channel_state = &mut *channel_lock; match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { @@ -3165,7 +3656,7 @@ impl ChannelMana hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone()); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false); Ok(()) } @@ -3211,8 +3702,8 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) = - match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) { + let (revoke_and_ack, commitment_signed, monitor_update) = + match chan.get_mut().commitment_signed(&msg, &self.logger) { Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); @@ -3224,7 +3715,6 @@ impl ChannelMana }; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); - //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), @@ -3243,12 +3733,6 @@ impl ChannelMana }, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } Ok(()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3268,6 +3752,7 @@ impl ChannelMana match channel_state.forward_htlcs.entry(match forward_info.routing { PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, PendingHTLCRouting::Receive { .. } => 0, + PendingHTLCRouting::ReceiveKeysend { .. } => 0, }) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint, @@ -3303,12 +3788,12 @@ impl ChannelMana break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) = - break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan); + let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) = + break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); htlcs_to_fail = htlcs_to_fail_in; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { - assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); } else { if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) { @@ -3322,12 +3807,6 @@ impl ChannelMana updates, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap())) }, hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3472,73 +3951,17 @@ impl ChannelMana Ok(()) } - /// Begin Update fee process. Allowed only on an outbound channel. - /// If successful, will generate a UpdateHTLCs event, so you should probably poll - /// PeerManager::process_events afterwards. - /// Note: This API is likely to change! - /// (C-not exported) Cause its doc(hidden) anyway - #[doc(hidden)] - pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let counterparty_node_id; - let err: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - - match channel_state.by_id.entry(channel_id) { - hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}), - hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_outbound() { - return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()}); - } - if chan.get().is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()}); - } - counterparty_node_id = chan.get().get_counterparty_node_id(); - if let Some((update_fee, commitment_signed, monitor_update)) = - break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan) - { - if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - unimplemented!(); - } - log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, - }); - } - }, - } - return Ok(()) - }; - - match handle_error!(self, err, counterparty_node_id) { - Ok(_) => unreachable!(), - Err(e) => { Err(APIError::APIMisuseError { err: e.err })} - } - } - /// Process pending events from the `chain::Watch`, returning whether any events were processed. fn process_pending_monitor_events(&self) -> bool { let mut failed_channels = Vec::new(); - let pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); + let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for monitor_event in pending_monitor_events { + for monitor_event in pending_monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); @@ -3560,6 +3983,7 @@ impl ChannelMana msg: update }); } + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::CommitmentTxBroadcasted }); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -3621,13 +4045,14 @@ impl ChannelMana Err(e) => { let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + // ChannelClosed event is generated by handle_error for us !close_channel } } }); } - let has_update = has_monitor_update || !failed_htlcs.is_empty(); + let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); for (failures, channel_id) in failed_htlcs.drain(..) { self.fail_holding_cell_htlcs(failures, channel_id); } @@ -3639,6 +4064,70 @@ impl ChannelMana has_update } + /// Check whether any channels have finished removing all pending updates after a shutdown + /// exchange and can now send a closing_signed. + /// Returns whether any closing_signed messages were generated. + fn maybe_generate_initial_closing_signed(&self) -> bool { + let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); + let mut has_update = false; + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + + by_id.retain(|channel_id, chan| { + match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { + Ok((msg_opt, tx_opt)) => { + if let Some(msg) = msg_opt { + has_update = true; + pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: chan.get_counterparty_node_id(), msg, + }); + } + if let Some(tx) = tx_opt { + // We're done with this channel. We got a closing_signed and sent back + // a closing_signed with a closing transaction to broadcast. + if let Some(short_id) = chan.get_short_channel_id() { + short_to_id.remove(&short_id); + } + + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + + if let Ok(mut pending_events_lock) = self.pending_events.lock() { + pending_events_lock.push(events::Event::ChannelClosed { + channel_id: *channel_id, + reason: ClosureReason::HolderForceClosed + }); + } + + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + self.tx_broadcaster.broadcast_transaction(&tx); + false + } else { true } + }, + Err(e) => { + has_update = true; + let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } + } + }); + } + + for (counterparty_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } + + has_update + } + /// Handle a list of channel failures during a block_connected or block_disconnected call, /// pushing the channel monitor update (if any) to the background events queue and removing the /// Channel object. @@ -3723,7 +4212,7 @@ impl ChannelMana /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This /// method may return an Err if another payment with the same payment_hash is still pending. /// - /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to + /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to /// allow tracking of which events correspond with which calls to this and /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events @@ -3757,7 +4246,7 @@ impl ChannelMana /// /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`PaymentReceived`]: events::Event::PaymentReceived - /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id + /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result { self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id) } @@ -3765,7 +4254,7 @@ impl ChannelMana #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { let events = core::cell::RefCell::new(Vec::new()); - let event_handler = |event| events.borrow_mut().push(event); + let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone()); self.process_pending_events(&event_handler); events.into_inner() } @@ -3792,6 +4281,9 @@ impl MessageSend if self.check_free_holding_cells() { result = NotifyOption::DoPersist; } + if self.maybe_generate_initial_closing_signed() { + result = NotifyOption::DoPersist; + } let mut pending_events = Vec::new(); let mut channel_state = self.channel_state.lock().unwrap(); @@ -3833,13 +4325,13 @@ where result = NotifyOption::DoPersist; } - let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]); + let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]); if !pending_events.is_empty() { result = NotifyOption::DoPersist; } for event in pending_events.drain(..) { - handler.handle_event(event); + handler.handle_event(&event); } result @@ -4034,6 +4526,7 @@ where msg: update }); } + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: ClosureReason::CommitmentTxBroadcasted }); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: e }, @@ -4096,6 +4589,12 @@ where let guard = mtx.lock().unwrap(); *guard } + + /// Gets the latest best block which was connected either via the [`chain::Listen`] or + /// [`chain::Confirm`] interfaces. + pub fn current_best_block(&self) -> BestBlock { + self.best_block.read().unwrap().clone() + } } impl @@ -4218,6 +4717,7 @@ impl msg: update }); } + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer }); false } else { true @@ -4232,6 +4732,7 @@ impl if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); } + self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer }); return false; } else { no_channels_remain = false; @@ -4258,7 +4759,6 @@ impl &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, &events::MessageSendEvent::SendShortIdsQuery { .. } => false, &events::MessageSendEvent::SendReplyChannelRange { .. } => false, @@ -4321,14 +4821,14 @@ impl if msg.channel_id == [0; 32] { for chan in self.list_channels() { - if chan.remote_network_id == *counterparty_node_id { + if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } else { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } @@ -4415,7 +4915,11 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, Receive) => { (0, payment_data, required), (2, incoming_cltv_expiry, required), - } + }, + (2, ReceiveKeysend) => { + (0, payment_preimage, required), + (2, incoming_cltv_expiry, required), + }, ;); impl_writeable_tlv_based!(PendingHTLCInfo, { @@ -4442,21 +4946,118 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (6, incoming_packet_shared_secret, required) }); -impl_writeable_tlv_based!(ClaimableHTLC, { - (0, prev_hop, required), - (2, value, required), - (4, payment_data, required), - (6, cltv_expiry, required), -}); +impl Writeable for ClaimableHTLC { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + let payment_data = match &self.onion_payload { + OnionPayload::Invoice(data) => Some(data.clone()), + _ => None, + }; + let keysend_preimage = match self.onion_payload { + OnionPayload::Invoice(_) => None, + OnionPayload::Spontaneous(preimage) => Some(preimage.clone()), + }; + write_tlv_fields! + (writer, + { + (0, self.prev_hop, required), (2, self.value, required), + (4, payment_data, option), (6, self.cltv_expiry, required), + (8, keysend_preimage, option), + }); + Ok(()) + } +} -impl_writeable_tlv_based_enum!(HTLCSource, - (0, OutboundRoute) => { - (0, session_priv, required), - (2, first_hop_htlc_msat, required), - (4, path, vec_type), - }, ; - (1, PreviousHopData) -); +impl Readable for ClaimableHTLC { + fn read(reader: &mut R) -> Result { + let mut prev_hop = ::util::ser::OptionDeserWrapper(None); + let mut value = 0; + let mut payment_data: Option = None; + let mut cltv_expiry = 0; + let mut keysend_preimage: Option = None; + read_tlv_fields! + (reader, + { + (0, prev_hop, required), (2, value, required), + (4, payment_data, option), (6, cltv_expiry, required), + (8, keysend_preimage, option) + }); + let onion_payload = match keysend_preimage { + Some(p) => { + if payment_data.is_some() { + return Err(DecodeError::InvalidValue) + } + OnionPayload::Spontaneous(p) + }, + None => { + if payment_data.is_none() { + return Err(DecodeError::InvalidValue) + } + OnionPayload::Invoice(payment_data.unwrap()) + }, + }; + Ok(Self { + prev_hop: prev_hop.0.unwrap(), + value, + onion_payload, + cltv_expiry, + }) + } +} + +impl Readable for HTLCSource { + fn read(reader: &mut R) -> Result { + let id: u8 = Readable::read(reader)?; + match id { + 0 => { + let mut session_priv: ::util::ser::OptionDeserWrapper = ::util::ser::OptionDeserWrapper(None); + let mut first_hop_htlc_msat: u64 = 0; + let mut path = Some(Vec::new()); + let mut mpp_id = None; + read_tlv_fields!(reader, { + (0, session_priv, required), + (1, mpp_id, option), + (2, first_hop_htlc_msat, required), + (4, path, vec_type), + }); + if mpp_id.is_none() { + // For backwards compat, if there was no mpp_id written, use the session_priv bytes + // instead. + mpp_id = Some(MppId(*session_priv.0.unwrap().as_ref())); + } + Ok(HTLCSource::OutboundRoute { + session_priv: session_priv.0.unwrap(), + first_hop_htlc_msat: first_hop_htlc_msat, + path: path.unwrap(), + mpp_id: mpp_id.unwrap(), + }) + } + 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), + _ => Err(DecodeError::UnknownRequiredFeature), + } + } +} + +impl Writeable for HTLCSource { + fn write(&self, writer: &mut W) -> Result<(), ::io::Error> { + match self { + HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, mpp_id } => { + 0u8.write(writer)?; + let mpp_id_opt = Some(mpp_id); + write_tlv_fields!(writer, { + (0, session_priv, required), + (1, mpp_id_opt, option), + (2, first_hop_htlc_msat, required), + (4, path, vec_type), + }); + } + HTLCSource::PreviousHopData(ref field) => { + 1u8.write(writer)?; + field.write(writer)?; + } + } + Ok(()) + } +} impl_writeable_tlv_based_enum!(HTLCFailReason, (0, LightningError) => { @@ -4496,7 +5097,7 @@ impl Writeable f F::Target: FeeEstimator, L::Target: Logger, { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { let _consistency_lock = self.total_consistency_lock.write().unwrap(); write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); @@ -4577,12 +5178,21 @@ impl Writeable f } let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); - (pending_outbound_payments.len() as u64).write(writer)?; - for session_priv in pending_outbound_payments.iter() { - session_priv.write(writer)?; + // For backwards compat, write the session privs and their total length. + let mut num_pending_outbounds_compat: u64 = 0; + for (_, outbounds) in pending_outbound_payments.iter() { + num_pending_outbounds_compat += outbounds.len() as u64; + } + num_pending_outbounds_compat.write(writer)?; + for (_, outbounds) in pending_outbound_payments.iter() { + for outbound in outbounds.iter() { + outbound.write(writer)?; + } } - write_tlv_fields!(writer, {}); + write_tlv_fields!(writer, { + (1, pending_outbound_payments, required), + }); Ok(()) } @@ -4692,7 +5302,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> F::Target: FeeEstimator, L::Target: Logger, { - fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { + fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; Ok((blockhash, Arc::new(chan_manager))) } @@ -4706,7 +5316,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> F::Target: FeeEstimator, L::Target: Logger, { - fn read(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { + fn read(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let genesis_hash: BlockHash = Readable::read(reader)?; @@ -4719,6 +5329,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut channel_closures = Vec::new(); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, &args.keys_manager)?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; @@ -4742,9 +5353,17 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: + log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); + log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); + log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); let (_, mut new_failed_htlcs) = channel.force_shutdown(true); failed_htlcs.append(&mut new_failed_htlcs); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); + channel_closures.push(events::Event::ChannelClosed { + channel_id: channel.channel_id(), + reason: ClosureReason::OutdatedChanMan + }); } else { if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_id.insert(short_channel_id, channel.channel_id()); @@ -4831,19 +5450,31 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - let pending_outbound_payments_count: u64 = Readable::read(reader)?; - let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32)); - for _ in 0..pending_outbound_payments_count { - if !pending_outbound_payments.insert(Readable::read(reader)?) { - return Err(DecodeError::InvalidValue); - } + let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; + let mut pending_outbound_payments_compat: HashMap> = + HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + for _ in 0..pending_outbound_payments_count_compat { + let session_priv = Readable::read(reader)?; + if pending_outbound_payments_compat.insert(MppId(session_priv), [session_priv].iter().cloned().collect()).is_some() { + return Err(DecodeError::InvalidValue) + }; } - read_tlv_fields!(reader, {}); + let mut pending_outbound_payments = None; + read_tlv_fields!(reader, { + (1, pending_outbound_payments, option), + }); + if pending_outbound_payments.is_none() { + pending_outbound_payments = Some(pending_outbound_payments_compat); + } let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); + if !channel_closures.is_empty() { + pending_events_read.append(&mut channel_closures); + } + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -4860,7 +5491,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_msg_events: Vec::new(), }), pending_inbound_payments: Mutex::new(pending_inbound_payments), - pending_outbound_payments: Mutex::new(pending_outbound_payments), + pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), our_network_key: args.keys_manager.get_node_secret(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()), @@ -4894,17 +5525,28 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> #[cfg(test)] mod tests { - use ln::channelmanager::PersistenceNotifier; - use std::sync::Arc; - use core::sync::atomic::{AtomicBool, Ordering}; - use std::thread; + use bitcoin::hashes::Hash; + use bitcoin::hashes::sha256::Hash as Sha256; use core::time::Duration; + use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; + use ln::channelmanager::{MppId, PaymentSendFailure}; + use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; - use ln::features::InitFeatures; + use ln::msgs; use ln::msgs::ChannelMessageHandler; + use routing::router::{get_keysend_route, get_route}; + use util::errors::APIError; + use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use util::test_utils; + #[cfg(feature = "std")] #[test] fn test_wait_timeout() { + use ln::channelmanager::PersistenceNotifier; + use sync::Arc; + use core::sync::atomic::{AtomicBool, Ordering}; + use std::thread; + let persistence_notifier = Arc::new(PersistenceNotifier::new()); let thread_notifier = Arc::clone(&persistence_notifier); @@ -4954,6 +5596,12 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + // All nodes start with a persistable update pending as `create_network` connects each node + // with all other nodes to make most tests simpler. + assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // We check that the channel info nodes have doesn't change too early, even though we try @@ -5016,6 +5664,298 @@ mod tests { assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info); assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info); } + + #[test] + fn test_keysend_dup_hash_partial_mpp() { + // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as + // expected. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First, send a partial MPP payment. + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]); + let mpp_id = MppId([42; 32]); + // Use the utility function send_payment_along_path to send the payment with MPP data which + // indicates there are more HTLCs coming. + let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); + + // Next, send a keysend payment with the same payment_hash and make sure it fails. + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + let events = nodes[0].node.get_and_clear_pending_events(); + expect_payment_failed!(nodes[0], events, our_payment_hash, true); + + // Send the second half of the original MPP payment. + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None); + + // Claim the full MPP payment. Note that we can't use a test utility like + // claim_funds_along_route because the ordering of the messages causes the second half of the + // payment to be put in the holding cell, which confuses the test utilities. So we exchange the + // lightning messages manually. + assert!(nodes[1].node.claim_funds(payment_preimage)); + check_added_monitors!(nodes[1], 2); + let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa); + check_added_monitors!(nodes[1], 1); + let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs); + check_added_monitors!(nodes[1], 1); + let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); + check_added_monitors!(nodes[0], 1); + + // Note that successful MPP payments will generate 1 event upon the first path's success. No + // further events will be generated for subsequence path successes. + let events = nodes[0].node.get_and_clear_pending_events(); + match events[0] { + Event::PaymentSent { payment_preimage: ref preimage } => { + assert_eq!(payment_preimage, *preimage); + }, + _ => panic!("Unexpected event"), + } + } + + #[test] + fn test_keysend_dup_payment_hash() { + + // (1): Test that a keysend payment with a duplicate payment hash to an existing pending + // outbound regular payment fails as expected. + // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment + // fails as expected. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // To start (1), send a regular payment but don't claim it. + let expected_route = [&nodes[1]]; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000); + + // Next, attempt a keysend payment and make sure it fails. + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + let events = nodes[0].node.get_and_clear_pending_events(); + expect_payment_failed!(nodes[0], events, payment_hash, true); + + // Finally, claim the original payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); + + // To start (2), send a keysend payment but don't claim it. + let payment_preimage = PaymentPreimage([42; 32]); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let path = vec![&nodes[1]]; + pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage)); + + // Next, attempt a regular payment and make sure it fails. + let payment_secret = PaymentSecret([43; 32]); + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + let events = nodes[0].node.get_and_clear_pending_events(); + expect_payment_failed!(nodes[0], events, payment_hash, true); + + // Finally, succeed the keysend payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); + } + + #[test] + fn test_keysend_hash_mismatch() { + // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend + // preimage doesn't match the msg's payment hash. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let payer_pubkey = nodes[0].node.get_our_node_id(); + let payee_pubkey = nodes[1].node.get_our_node_id(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() }); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); + + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); + let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let first_hops = nodes[0].node.list_usable_channels(); + let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey, + Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, + nodes[0].logger).unwrap(); + + let test_preimage = PaymentPreimage([42; 32]); + let mismatch_payment_hash = PaymentHash([43; 32]); + let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1); + } + + #[test] + fn test_keysend_msg_with_secret_err() { + // Test that we error as expected if we receive a keysend payment that includes a payment secret. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let payer_pubkey = nodes[0].node.get_our_node_id(); + let payee_pubkey = nodes[1].node.get_our_node_id(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() }); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); + + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); + let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let first_hops = nodes[0].node.list_usable_channels(); + let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey, + Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, + nodes[0].logger).unwrap(); + + let test_preimage = PaymentPreimage([42; 32]); + let test_secret = PaymentSecret([43; 32]); + let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner()); + let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1); + } + + #[test] + fn test_multi_hop_missing_secret() { + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let logger = test_utils::TestLogger::new(); + + // Marshall an MPP route. + let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]); + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let path = route.paths[0].clone(); + route.paths.push(path); + route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0][0].short_channel_id = chan_1_id; + route.paths[0][1].short_channel_id = chan_3_id; + route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1][0].short_channel_id = chan_2_id; + route.paths[1][1].short_channel_id = chan_4_id; + + match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() { + PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { + assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) }, + _ => panic!("unexpected error") + } + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))] @@ -5027,18 +5967,18 @@ pub mod bench { use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; - use ln::msgs::ChannelMessageHandler; + use ln::msgs::{ChannelMessageHandler, Init}; use routing::network_graph::NetworkGraph; use routing::router::get_route; use util::test_utils; use util::config::UserConfig; - use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::{Block, BlockHeader, Transaction, TxOut}; - use std::sync::{Arc, Mutex}; + use sync::{Arc, Mutex}; use test::Bencher; @@ -5065,7 +6005,7 @@ pub mod bench { let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash(); let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))}; - let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; + let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let mut config: UserConfig = Default::default(); config.own_channel_config.minimum_depth = 1; @@ -5090,6 +6030,8 @@ pub mod bench { }); let node_b_holder = NodeHolder { node: &node_b }; + node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() }); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() }); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));