X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=ac2297f86aeab74651b7f2f86ba2f903870be565;hb=2e5e40e0b21fbac862478dce4d81cf037f37060d;hp=19a8ad082f87a5191f78df8f744675b2fdb8edce;hpb=f60a65fec32e8525f27b8b388b609c03489e87eb;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 19a8ad08..ac2297f8 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -37,14 +37,14 @@ use bitcoin::secp256k1; use chain; use chain::{Confirm, Watch, BestBlock}; -use chain::chaininterface::{BroadcasterInterface, FeeEstimator}; +use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use ln::{PaymentHash, PaymentPreimage, PaymentSecret}; pub use ln::channel::CounterpartyForwardingInfo; -use ln::channel::{Channel, ChannelError, ChannelUpdateStatus}; +use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; use ln::msgs; @@ -57,20 +57,20 @@ use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEv use util::{byte_utils, events}; use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; -use util::logger::Logger; +use util::logger::{Logger, Level}; use util::errors::APIError; +use io; use prelude::*; use core::{cmp, mem}; use core::cell::RefCell; -use std::io::{Cursor, Read}; +use io::{Cursor, Read}; use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; #[cfg(any(test, feature = "allow_wallclock_use"))] use std::time::Instant; use core::ops::Deref; -use bitcoin::hashes::hex::ToHex; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -157,14 +157,20 @@ pub(crate) struct HTLCPreviousHopData { outpoint: OutPoint, } -struct ClaimableHTLC { - prev_hop: HTLCPreviousHopData, - value: u64, +enum OnionPayload { /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a /// payment_secret which prevents path-probing attacks and can associate different HTLCs which /// are part of the same payment. - payment_data: msgs::FinalOnionHopData, + Invoice(msgs::FinalOnionHopData), + /// Contains the payer-provided preimage. + Spontaneous(PaymentPreimage), +} + +struct ClaimableHTLC { + prev_hop: HTLCPreviousHopData, cltv_expiry: u32, + value: u64, + onion_payload: OnionPayload, } /// Tracks the inbound corresponding to an outbound HTLC @@ -201,6 +207,14 @@ pub(super) enum HTLCFailReason { } } +/// Return value for claim_funds_from_hop +enum ClaimFundsFromHop { + PrevHopForceClosed, + MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option), + Success(u64), + DuplicateClaim, +} + type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a @@ -262,6 +276,10 @@ impl MsgHandleErrInternal { fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { + ChannelError::Warn(msg) => LightningError { + err: msg, + action: msgs::ErrorAction::IgnoreError, + }, ChannelError::Ignore(msg) => LightningError { err: msg, action: msgs::ErrorAction::IgnoreError, @@ -476,6 +494,8 @@ pub struct ChannelManager>>, pending_events: Mutex>, @@ -803,6 +823,11 @@ macro_rules! handle_error { macro_rules! convert_chan_err { ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { match $err { + ChannelError::Warn(msg) => { + //TODO: Once warning messages are merged, we should send a `warning` message to our + //peer here. + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) + }, ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, @@ -856,6 +881,18 @@ macro_rules! try_chan_entry { } } +macro_rules! remove_channel { + ($channel_state: expr, $entry: expr) => { + { + let channel = $entry.remove_entry().1; + if let Some(short_id) = channel.get_short_channel_id() { + $channel_state.short_to_id.remove(&short_id); + } + channel + } + } +} + macro_rules! handle_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) @@ -1150,8 +1187,18 @@ impl ChannelMana return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } - let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?; + let channel = { + let per_peer_state = self.per_peer_state.read().unwrap(); + match per_peer_state.get(&their_network_key) { + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; + Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)? + }, + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), + } + }; let res = channel.get_open_channel(self.genesis_hash.clone()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -1237,51 +1284,111 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } - /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs - /// will be accepted on the given channel, and after additional timeout/the closing of all - /// pending HTLCs, the channel will be closed on chain. - /// - /// May generate a SendShutdown message event on success, which should be relayed. - pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let (mut failed_htlcs, chan_option) = { + let counterparty_node_id; + let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; + let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { - let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?; + counterparty_node_id = chan_entry.get().get_counterparty_node_id(); + let per_peer_state = self.per_peer_state.read().unwrap(); + let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) { + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? + }, + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), + }; + failed_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { + let (result, is_permanent) = + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key()); + if is_permanent { + remove_channel!(channel_state, chan_entry); + break result; + } + } + } + channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: chan_entry.get().get_counterparty_node_id(), + node_id: counterparty_node_id, msg: shutdown_msg }); + if chan_entry.get().is_shutdown() { - if let Some(short_id) = chan_entry.get().get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); + let channel = remove_channel!(channel_state, chan_entry); + if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) { + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: channel_update + }); } - (failed_htlcs, Some(chan_entry.remove_entry().1)) - } else { (failed_htlcs, None) } + } + break Ok(()); }, hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}) } }; + for htlc_source in failed_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } - let chan_update = if let Some(chan) = chan_option { - self.get_channel_update_for_broadcast(&chan).ok() - } else { None }; - - if let Some(update) = chan_update { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } + let _ = handle_error!(self, result, counterparty_node_id); Ok(()) } + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// * If we are the channel initiator, we will pay between our [`Background`] and + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate. + /// * If our counterparty is the channel initiator, we will require a channel closing + /// transaction feerate of at least our [`Background`] feerate or the feerate which + /// would appear on a force-closure transaction, whichever is lower. We will allow our + /// counterparty to pay as much fee as they'd like, however. + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + self.close_channel_internal(channel_id, None) + } + + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated + /// the channel being closed or not: + /// * If we are the channel initiator, we will pay at least this feerate on the closing + /// transaction. The upper-bound is set by + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). + /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure + /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which + /// will appear on a force-closure transaction, whichever is lower). + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { + self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight)) + } + #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (monitor_update_option, mut failed_htlcs) = shutdown_res; @@ -1418,8 +1525,8 @@ impl ChannelMana let mut chacha = ChaCha20::new(&rho, &[0u8; 8]); let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) }; - let (next_hop_data, next_hop_hmac) = { - match msgs::OnionHopData::read(&mut chacha_stream) { + let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = { + match ::read(&mut chacha_stream) { Err(err) => { let error_code = match err { msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte @@ -1459,10 +1566,11 @@ impl ChannelMana // OUR PAYMENT! // final_expiry_too_soon - // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least - // HTLC_FAIL_BACK_BUFFER blocks to go. - // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward - // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational). + // We have to have some headroom to broadcast on chain if we have the preimage, so make sure + // we have at least HTLC_FAIL_BACK_BUFFER blocks to go. + // Also, ensure that, in the case of an unknown preimage for the received payment hash, our + // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a + // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); } @@ -1475,15 +1583,37 @@ impl ChannelMana return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); } - let payment_data = match next_hop_data.format { - msgs::OnionHopDataFormat::Legacy { .. } => None, + let routing = match next_hop_data.format { + msgs::OnionHopDataFormat::Legacy { .. } => return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]), msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]), - msgs::OnionHopDataFormat::FinalNode { payment_data, .. } => payment_data, - }; + msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => { + if payment_data.is_some() && keysend_preimage.is_some() { + return_err!("We don't support MPP keysend payments", 0x4000|22, &[0;0]); + } else if let Some(data) = payment_data { + PendingHTLCRouting::Receive { + payment_data: data, + incoming_cltv_expiry: msg.cltv_expiry, + } + } else if let Some(payment_preimage) = keysend_preimage { + // We need to check that the sender knows the keysend preimage before processing this + // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X + // could discover the final destination of X, by probing the adjacent nodes on the route + // with a keysend payment of identical payment hash to X and observing the processing + // time discrepancies due to a hash collision with X. + let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + if hashed_preimage != msg.payment_hash { + return_err!("Payment preimage didn't match payment hash", 0x4000|22, &[0;0]); + } - if payment_data.is_none() { - return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]); - } + PendingHTLCRouting::ReceiveKeysend { + payment_preimage, + incoming_cltv_expiry: msg.cltv_expiry, + } + } else { + return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]); + } + }, + }; // Note that we could obviously respond immediately with an update_fulfill_htlc // message, however that would leak that we are the recipient of this payment, so @@ -1491,10 +1621,7 @@ impl ChannelMana // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { - payment_data: payment_data.unwrap(), - incoming_cltv_expiry: msg.cltv_expiry, - }, + routing, payment_hash: msg.payment_hash.clone(), incoming_shared_secret: shared_secret, amt_to_forward: next_hop_data.amt_to_forward, @@ -1693,7 +1820,7 @@ impl ChannelMana } // Only public for testing, this should otherwise never be called direcly - pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32) -> Result<(), APIError> { + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, keysend_preimage: &Option) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let prng_seed = self.keys_manager.get_secure_random_bytes(); let session_priv_bytes = self.keys_manager.get_secure_random_bytes(); @@ -1701,7 +1828,7 @@ impl ChannelMana let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?; - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?; + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?; if onion_utils::route_size_insane(&onion_payloads) { return Err(APIError::RouteError{err: "Route size too large considering onion data"}); } @@ -1810,6 +1937,10 @@ impl ChannelMana /// bit set (either as required or as available). If multiple paths are present in the Route, /// we assume the invoice had the basic_mpp feature set. pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result<(), PaymentSendFailure> { + self.send_payment_internal(route, payment_hash, payment_secret, None) + } + + fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option) -> Result<(), PaymentSendFailure> { if route.paths.len() < 1 { return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); } @@ -1819,6 +1950,9 @@ impl ChannelMana // for now more than 10 paths likely carries too much one-path failure. return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"})); } + if payment_secret.is_none() && route.paths.len() > 1 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()})); + } let mut total_value = 0; let our_node_id = self.get_our_node_id(); let mut path_errs = Vec::with_capacity(route.paths.len()); @@ -1843,7 +1977,7 @@ impl ChannelMana let cur_height = self.best_block.read().unwrap().height() + 1; let mut results = Vec::new(); for path in route.paths.iter() { - results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height)); + results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, &keysend_preimage)); } let mut has_ok = false; let mut has_err = false; @@ -1867,6 +2001,32 @@ impl ChannelMana } } + /// Send a spontaneous payment, which is a payment that does not require the recipient to have + /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify + /// the preimage, it must be a cryptographically secure random value that no intermediate node + /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will + /// never reach the recipient. + /// + /// See [`send_payment`] documentation for more details on the return value of this function. + /// + /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See + /// [`send_payment`] for more information about the risks of duplicate preimage usage. + /// + /// Note that `route` must have exactly one path. + /// + /// [`send_payment`]: Self::send_payment + pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result { + let preimage = match payment_preimage { + Some(p) => p, + None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()), + }; + let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner()); + match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) { + Ok(()) => Ok(payment_hash), + Err(e) => Err(e) + } + } + /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. fn funding_transaction_generated_intern, &Transaction) -> Result> @@ -2210,9 +2370,9 @@ impl ChannelMana // close channel and then send error message to peer. let counterparty_node_id = chan.get().get_counterparty_node_id(); let err: Result<(), _> = match e { - ChannelError::Ignore(_) => { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { panic!("Stated return value requirements in send_commitment() were not met"); - }, + } ChannelError::Close(msg) => { log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); let (channel_id, mut channel) = chan.remove_entry(); @@ -2252,9 +2412,17 @@ impl ChannelMana for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry }, - incoming_shared_secret, payment_hash, amt_to_forward, .. }, + routing, incoming_shared_secret, payment_hash, amt_to_forward, .. }, prev_funding_outpoint } => { + let (cltv_expiry, onion_payload) = match routing { + PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry } => + (incoming_cltv_expiry, OnionPayload::Invoice(payment_data)), + PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } => + (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage)), + _ => { + panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); + } + }; let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, @@ -2263,8 +2431,8 @@ impl ChannelMana incoming_packet_shared_secret: incoming_shared_secret, }, value: amt_to_forward, - payment_data: payment_data.clone(), - cltv_expiry: incoming_cltv_expiry, + cltv_expiry, + onion_payload, }; macro_rules! fail_htlc { @@ -2293,10 +2461,38 @@ impl ChannelMana let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); match payment_secrets.entry(payment_hash) { hash_map::Entry::Vacant(_) => { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + match claimable_htlc.onion_payload { + OnionPayload::Invoice(_) => { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + }, + OnionPayload::Spontaneous(preimage) => { + match channel_state.claimable_htlcs.entry(payment_hash) { + hash_map::Entry::Vacant(e) => { + e.insert(vec![claimable_htlc]); + new_events.push(events::Event::PaymentReceived { + payment_hash, + amt: amt_to_forward, + purpose: events::PaymentPurpose::SpontaneousPayment(preimage), + }); + }, + hash_map::Entry::Occupied(_) => { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + } + } + } + } }, hash_map::Entry::Occupied(inbound_payment) => { + let payment_data = + if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload { + data.clone() + } else { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + continue + }; if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc); @@ -2308,15 +2504,27 @@ impl ChannelMana let mut total_value = 0; let htlcs = channel_state.claimable_htlcs.entry(payment_hash) .or_insert(Vec::new()); + if htlcs.len() == 1 { + if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + continue + } + } htlcs.push(claimable_htlc); for htlc in htlcs.iter() { total_value += htlc.value; - if htlc.payment_data.total_msat != payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", - log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat); - total_value = msgs::MAX_VALUE_MSAT; + match &htlc.onion_payload { + OnionPayload::Invoice(htlc_payment_data) => { + if htlc_payment_data.total_msat != payment_data.total_msat { + log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", + log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat); + total_value = msgs::MAX_VALUE_MSAT; + } + if total_value >= msgs::MAX_VALUE_MSAT { break; } + }, + _ => unreachable!(), } - if total_value >= msgs::MAX_VALUE_MSAT { break; } } if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat { log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)", @@ -2327,10 +2535,12 @@ impl ChannelMana } else if total_value == payment_data.total_msat { new_events.push(events::Event::PaymentReceived { payment_hash, - payment_preimage: inbound_payment.get().payment_preimage, - payment_secret: payment_data.payment_secret, + purpose: events::PaymentPurpose::InvoicePayment { + payment_preimage: inbound_payment.get().payment_preimage, + payment_secret: payment_data.payment_secret, + user_payment_id: inbound_payment.get().user_payment_id, + }, amt: total_value, - user_payment_id: inbound_payment.get().user_payment_id, }); // Only ever generate at most one PaymentReceived // per registered payment_hash, even if it isn't @@ -2345,9 +2555,6 @@ impl ChannelMana }, }; }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); - }, HTLCForwardInfo::FailHTLC { .. } => { panic!("Got pending fail of our own HTLC"); } @@ -2401,48 +2608,160 @@ impl ChannelMana self.process_background_events(); } - /// If a peer is disconnected we mark any channels with that peer as 'disabled'. - /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate - /// to inform the network about the uselessness of these channels. + fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { + if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } + // If the feerate has decreased by less than half, don't bother + if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + return (true, NotifyOption::SkipPersist, Ok(())); + } + if !chan.is_live() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + return (true, NotifyOption::SkipPersist, Ok(())); + } + log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + + let mut retain_channel = true; + let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { + Ok(res) => Ok(res), + Err(e) => { + let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + if drop { retain_channel = false; } + Err(res) + } + }; + let ret_err = match res { + Ok(Some((update_fee, commitment_signed, monitor_update))) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id); + if drop { retain_channel = false; } + res + } else { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + Ok(()) + } + }, + Ok(None) => Ok(()), + Err(e) => Err(e), + }; + (retain_channel, NotifyOption::DoPersist, ret_err) + } + + #[cfg(fuzzing)] + /// In chanmon_consistency we want to sometimes do the channel fee updates done in + /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers + /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what + /// it wants to detect). Thus, we have a variant exposed here for its benefit. + pub fn maybe_update_chan_fees(&self) { + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + let mut should_persist = NotifyOption::SkipPersist; + + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push(err); + } + retain_channel + }); + } + + should_persist + }); + } + + /// Performs actions which should happen on startup and roughly once per minute thereafter. /// - /// This method handles all the details, and must be called roughly once per minute. + /// This currently includes: + /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels, + /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more + /// than a minute, informing the network that they should no longer attempt to route over + /// the channel. /// - /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call. + /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate + /// estimate fetches. pub fn timer_tick_occurred(&self) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = NotifyOption::SkipPersist; if self.process_background_events() { should_persist = NotifyOption::DoPersist; } - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - for (_, chan) in channel_state.by_id.iter_mut() { - match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), - ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Disabled); - }, - ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Enabled); - }, - _ => {}, - } + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + let counterparty_node_id = chan.get_counterparty_node_id(); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push((err, counterparty_node_id)); + } + if !retain_channel { return false; } + + if let Err(e) = chan.timer_check_closing_negotiation_progress() { + let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + handle_errors.push((Err(err), chan.get_counterparty_node_id())); + if needs_close { return false; } + } + + match chan.channel_update_status() { + ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), + ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), + ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), + ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), + ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Disabled); + }, + ChannelUpdateStatus::EnabledStaged if chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Enabled); + }, + _ => {}, + } + + true + }); } + for (err, counterparty_node_id) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } should_persist }); } @@ -2688,16 +3007,22 @@ impl ChannelMana HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }); } else { match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { - Err(Some(e)) => { - if let msgs::ErrorAction::IgnoreError = e.1.err.action { + ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { + if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the // HTLC when the monitor updating is restored (or on chain). - log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err); + log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); claimed_any_htlcs = true; - } else { errs.push(e); } + } else { errs.push((pk, err)); } }, - Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"), - Ok(()) => claimed_any_htlcs = true, + ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"), + ClaimFundsFromHop::DuplicateClaim => { + // While we should never get here in most cases, if we do, it likely + // indicates that the HTLC was timed out some time ago and is no longer + // available to be claimed. Thus, it does not make sense to set + // `claimed_any_htlcs`. + }, + ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true, } } } @@ -2715,62 +3040,68 @@ impl ChannelMana } else { false } } - fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> { + fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { Some(chan_id) => chan_id.clone(), None => { - return Err(None) + return ClaimFundsFromHop::PrevHopForceClosed } }; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { - let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { - Ok((msgs, monitor_option)) => { - if let Some(monitor_update) = monitor_option { + Ok(msgs_monitor_option) => { + if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - if was_frozen_for_monitor { - assert!(msgs.is_none()); - } else { - return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err()))); - } + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, + "Failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + return ClaimFundsFromHop::MonitorUpdateFail( + chan.get().get_counterparty_node_id(), + handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), + Some(htlc_value_msat) + ); } + if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + return ClaimFundsFromHop::Success(htlc_value_msat); + } else { + return ClaimFundsFromHop::DuplicateClaim; } - if let Some((msg, commitment_signed)) = msgs { - log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", - log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - } - }); - } - return Ok(()) }, - Err(e) => { - // TODO: Do something with e? - // This should only occur if we are claiming an HTLC at the same time as the - // HTLC is being failed (eg because a block is being connected and this caused - // an HTLC to time out). This should, of course, only occur if the user is the - // one doing the claiming (as it being a part of a peer claim would imply we're - // about to lose funds) and only if the lock in claim_funds was dropped as a - // previous HTLC was failed (thus not for an MPP payment). - debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e); - return Err(None) + Err((e, monitor_update)) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info }, + "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", + payment_preimage, e); + } + let counterparty_node_id = chan.get().get_counterparty_node_id(); + let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id); + if drop { + chan.remove_entry(); + } + return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); }, } } else { unreachable!(); } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { match source { HTLCSource::OutboundRoute { session_priv, .. } => { mem::drop(channel_state_lock); @@ -2789,29 +3120,51 @@ impl ChannelMana }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) { - Ok(()) => Ok(()), - Err(None) => { - let preimage_update = ChannelMonitorUpdate { - update_id: CLOSED_CHANNEL_UPDATE_ID, - updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage.clone(), - }], - }; - // We update the ChannelMonitor on the backward link, after - // receiving an offchain preimage event from the forward link (the - // event being update_fulfill_htlc). - if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { - log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, e); - } - Ok(()) - }, - Err(Some(res)) => Err(res), - } { - mem::drop(channel_state_lock); - let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); + let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage); + let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true }; + let htlc_claim_value_msat = match res { + ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt, + ClaimFundsFromHop::Success(amt) => Some(amt), + _ => None, + }; + if let ClaimFundsFromHop::PrevHopForceClosed = res { + let preimage_update = ChannelMonitorUpdate { + update_id: CLOSED_CHANNEL_UPDATE_ID, + updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage.clone(), + }], + }; + // We update the ChannelMonitor on the backward link, after + // receiving an offchain preimage event from the forward link (the + // event being update_fulfill_htlc). + if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { + log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + } + // Note that we do *not* set `claimed_htlc` to false here. In fact, this + // totally could be a duplicate claim, but we have no way of knowing + // without interrogating the `ChannelMonitor` we've provided the above + // update to. Instead, we simply document in `PaymentForwarded` that this + // can happen. + } + mem::drop(channel_state_lock); + if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res { + let result: Result<(), _> = Err(err); + let _ = handle_error!(self, result, pk); + } + + if claimed_htlc { + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { None }; + + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentForwarded { + fee_earned_msat, + claim_from_onchain_tx: from_onchain, + }); + } } }, } @@ -2885,7 +3238,7 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); } - let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration) + let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -2911,7 +3264,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan); + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, &their_features), channel_state, chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) @@ -3048,7 +3401,8 @@ impl ChannelMana } fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { - let (mut dropped_htlcs, chan_option) = { + let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>; + let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3057,25 +3411,36 @@ impl ChannelMana if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry); + + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } + + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); + dropped_htlcs = htlcs; + + // Update the monitor with the shutdown script if necessary. + if let Some(monitor_update) = monitor_update { + if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { + let (result, is_permanent) = + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key()); + if is_permanent { + remove_channel!(channel_state, chan_entry); + break result; + } + } + } + if let Some(msg) = shutdown { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { - node_id: counterparty_node_id.clone(), + node_id: *counterparty_node_id, msg, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } - if chan_entry.get().is_shutdown() { - if let Some(short_id) = chan_entry.get().get_short_channel_id() { - channel_state.short_to_id.remove(&short_id); - } - (dropped_htlcs, Some(chan_entry.remove_entry().1)) - } else { (dropped_htlcs, None) } + + break Ok(()); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -3083,14 +3448,8 @@ impl ChannelMana for htlc_source in dropped_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } - if let Some(chan) = chan_option { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - let mut channel_state = self.channel_state.lock().unwrap(); - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - } + + let _ = handle_error!(self, result, *counterparty_node_id); Ok(()) } @@ -3207,7 +3566,7 @@ impl ChannelMana fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); - let htlc_source = { + let (htlc_source, forwarded_htlc_value) = { let channel_state = &mut *channel_lock; match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { @@ -3219,7 +3578,7 @@ impl ChannelMana hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone()); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false); Ok(()) } @@ -3265,8 +3624,8 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) = - match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) { + let (revoke_and_ack, commitment_signed, monitor_update) = + match chan.get_mut().commitment_signed(&msg, &self.logger) { Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); @@ -3278,7 +3637,6 @@ impl ChannelMana }; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); - //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), @@ -3297,12 +3655,6 @@ impl ChannelMana }, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } Ok(()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3358,12 +3710,12 @@ impl ChannelMana break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) = - break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan); + let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) = + break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); htlcs_to_fail = htlcs_to_fail_in; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { - assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); } else { if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) { @@ -3377,12 +3729,6 @@ impl ChannelMana updates, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap())) }, hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3527,73 +3873,17 @@ impl ChannelMana Ok(()) } - /// Begin Update fee process. Allowed only on an outbound channel. - /// If successful, will generate a UpdateHTLCs event, so you should probably poll - /// PeerManager::process_events afterwards. - /// Note: This API is likely to change! - /// (C-not exported) Cause its doc(hidden) anyway - #[doc(hidden)] - pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let counterparty_node_id; - let err: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - - match channel_state.by_id.entry(channel_id) { - hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}), - hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_outbound() { - return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()}); - } - if chan.get().is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()}); - } - counterparty_node_id = chan.get().get_counterparty_node_id(); - if let Some((update_fee, commitment_signed, monitor_update)) = - break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan) - { - if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - unimplemented!(); - } - log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, - }); - } - }, - } - return Ok(()) - }; - - match handle_error!(self, err, counterparty_node_id) { - Ok(_) => unreachable!(), - Err(e) => { Err(APIError::APIMisuseError { err: e.err })} - } - } - /// Process pending events from the `chain::Watch`, returning whether any events were processed. fn process_pending_monitor_events(&self) -> bool { let mut failed_channels = Vec::new(); - let pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); + let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for monitor_event in pending_monitor_events { + for monitor_event in pending_monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); @@ -3682,7 +3972,7 @@ impl ChannelMana }); } - let has_update = has_monitor_update || !failed_htlcs.is_empty(); + let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); for (failures, channel_id) in failed_htlcs.drain(..) { self.fail_holding_cell_htlcs(failures, channel_id); } @@ -3694,6 +3984,63 @@ impl ChannelMana has_update } + /// Check whether any channels have finished removing all pending updates after a shutdown + /// exchange and can now send a closing_signed. + /// Returns whether any closing_signed messages were generated. + fn maybe_generate_initial_closing_signed(&self) -> bool { + let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); + let mut has_update = false; + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + + by_id.retain(|channel_id, chan| { + match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { + Ok((msg_opt, tx_opt)) => { + if let Some(msg) = msg_opt { + has_update = true; + pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: chan.get_counterparty_node_id(), msg, + }); + } + if let Some(tx) = tx_opt { + // We're done with this channel. We got a closing_signed and sent back + // a closing_signed with a closing transaction to broadcast. + if let Some(short_id) = chan.get_short_channel_id() { + short_to_id.remove(&short_id); + } + + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + self.tx_broadcaster.broadcast_transaction(&tx); + false + } else { true } + }, + Err(e) => { + has_update = true; + let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } + } + }); + } + + for (counterparty_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } + + has_update + } + /// Handle a list of channel failures during a block_connected or block_disconnected call, /// pushing the channel monitor update (if any) to the background events queue and removing the /// Channel object. @@ -3778,7 +4125,7 @@ impl ChannelMana /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This /// method may return an Err if another payment with the same payment_hash is still pending. /// - /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to + /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to /// allow tracking of which events correspond with which calls to this and /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events @@ -3812,7 +4159,7 @@ impl ChannelMana /// /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`PaymentReceived`]: events::Event::PaymentReceived - /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id + /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result { self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id) } @@ -3847,6 +4194,9 @@ impl MessageSend if self.check_free_holding_cells() { result = NotifyOption::DoPersist; } + if self.maybe_generate_initial_closing_signed() { + result = NotifyOption::DoPersist; + } let mut pending_events = Vec::new(); let mut channel_state = self.channel_state.lock().unwrap(); @@ -3888,7 +4238,7 @@ where result = NotifyOption::DoPersist; } - let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]); + let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]); if !pending_events.is_empty() { result = NotifyOption::DoPersist; } @@ -4507,12 +4857,63 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (6, incoming_packet_shared_secret, required) }); -impl_writeable_tlv_based!(ClaimableHTLC, { - (0, prev_hop, required), - (2, value, required), - (4, payment_data, required), - (6, cltv_expiry, required), -}); +impl Writeable for ClaimableHTLC { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + let payment_data = match &self.onion_payload { + OnionPayload::Invoice(data) => Some(data.clone()), + _ => None, + }; + let keysend_preimage = match self.onion_payload { + OnionPayload::Invoice(_) => None, + OnionPayload::Spontaneous(preimage) => Some(preimage.clone()), + }; + write_tlv_fields! + (writer, + { + (0, self.prev_hop, required), (2, self.value, required), + (4, payment_data, option), (6, self.cltv_expiry, required), + (8, keysend_preimage, option), + }); + Ok(()) + } +} + +impl Readable for ClaimableHTLC { + fn read(reader: &mut R) -> Result { + let mut prev_hop = ::util::ser::OptionDeserWrapper(None); + let mut value = 0; + let mut payment_data: Option = None; + let mut cltv_expiry = 0; + let mut keysend_preimage: Option = None; + read_tlv_fields! + (reader, + { + (0, prev_hop, required), (2, value, required), + (4, payment_data, option), (6, cltv_expiry, required), + (8, keysend_preimage, option) + }); + let onion_payload = match keysend_preimage { + Some(p) => { + if payment_data.is_some() { + return Err(DecodeError::InvalidValue) + } + OnionPayload::Spontaneous(p) + }, + None => { + if payment_data.is_none() { + return Err(DecodeError::InvalidValue) + } + OnionPayload::Invoice(payment_data.unwrap()) + }, + }; + Ok(Self { + prev_hop: prev_hop.0.unwrap(), + value, + onion_payload, + cltv_expiry, + }) + } +} impl_writeable_tlv_based_enum!(HTLCSource, (0, OutboundRoute) => { @@ -4561,7 +4962,7 @@ impl Writeable f F::Target: FeeEstimator, L::Target: Logger, { - fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { let _consistency_lock = self.total_consistency_lock.write().unwrap(); write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); @@ -4757,7 +5158,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> F::Target: FeeEstimator, L::Target: Logger, { - fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { + fn read(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { let (blockhash, chan_manager) = <(BlockHash, ChannelManager)>::read(reader, args)?; Ok((blockhash, Arc::new(chan_manager))) } @@ -4771,7 +5172,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> F::Target: FeeEstimator, L::Target: Logger, { - fn read(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { + fn read(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); let genesis_hash: BlockHash = Readable::read(reader)?; @@ -4807,6 +5208,10 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: + log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); + log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); + log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); let (_, mut new_failed_htlcs) = channel.force_shutdown(true); failed_htlcs.append(&mut new_failed_htlcs); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); @@ -4959,18 +5364,28 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> #[cfg(test)] mod tests { - use ln::channelmanager::PersistenceNotifier; - use sync::Arc; - use core::sync::atomic::{AtomicBool, Ordering}; - use std::thread; + use bitcoin::hashes::Hash; + use bitcoin::hashes::sha256::Hash as Sha256; use core::time::Duration; + use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; + use ln::channelmanager::PaymentSendFailure; + use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; - use ln::features::InitFeatures; + use ln::msgs; use ln::msgs::ChannelMessageHandler; + use routing::router::{get_keysend_route, get_route}; + use util::errors::APIError; + use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use util::test_utils; #[cfg(feature = "std")] #[test] fn test_wait_timeout() { + use ln::channelmanager::PersistenceNotifier; + use sync::Arc; + use core::sync::atomic::{AtomicBool, Ordering}; + use std::thread; + let persistence_notifier = Arc::new(PersistenceNotifier::new()); let thread_notifier = Arc::clone(&persistence_notifier); @@ -5020,6 +5435,12 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + // All nodes start with a persistable update pending as `create_network` connects each node + // with all other nodes to make most tests simpler. + assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); + let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // We check that the channel info nodes have doesn't change too early, even though we try @@ -5082,6 +5503,298 @@ mod tests { assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info); assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info); } + + #[test] + fn test_keysend_dup_hash_partial_mpp() { + // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as + // expected. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First, send a partial MPP payment. + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]); + // Use the utility function send_payment_along_path to send the payment with MPP data which + // indicates there are more HTLCs coming. + let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); + + // Next, send a keysend payment with the same payment_hash and make sure it fails. + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], our_payment_hash, true); + + // Send the second half of the original MPP payment. + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None); + + // Claim the full MPP payment. Note that we can't use a test utility like + // claim_funds_along_route because the ordering of the messages causes the second half of the + // payment to be put in the holding cell, which confuses the test utilities. So we exchange the + // lightning messages manually. + assert!(nodes[1].node.claim_funds(payment_preimage)); + check_added_monitors!(nodes[1], 2); + let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa); + check_added_monitors!(nodes[1], 1); + let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs); + check_added_monitors!(nodes[1], 1); + let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); + check_added_monitors!(nodes[0], 1); + + // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here. + let events = nodes[0].node.get_and_clear_pending_events(); + match events[0] { + Event::PaymentSent { payment_preimage: ref preimage } => { + assert_eq!(payment_preimage, *preimage); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentSent { payment_preimage: ref preimage } => { + assert_eq!(payment_preimage, *preimage); + }, + _ => panic!("Unexpected event"), + } + } + + #[test] + fn test_keysend_dup_payment_hash() { + // (1): Test that a keysend payment with a duplicate payment hash to an existing pending + // outbound regular payment fails as expected. + // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment + // fails as expected. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // To start (1), send a regular payment but don't claim it. + let expected_route = [&nodes[1]]; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000); + + // Next, attempt a keysend payment and make sure it fails. + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, true); + + // Finally, claim the original payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); + + // To start (2), send a keysend payment but don't claim it. + let payment_preimage = PaymentPreimage([42; 32]); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let path = vec![&nodes[1]]; + pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage)); + + // Next, attempt a regular payment and make sure it fails. + let payment_secret = PaymentSecret([43; 32]); + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, true); + + // Finally, succeed the keysend payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); + } + + #[test] + fn test_keysend_hash_mismatch() { + // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend + // preimage doesn't match the msg's payment hash. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let payer_pubkey = nodes[0].node.get_our_node_id(); + let payee_pubkey = nodes[1].node.get_our_node_id(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() }); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); + + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); + let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap(); + let first_hops = nodes[0].node.list_usable_channels(); + let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey, + Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, + nodes[0].logger).unwrap(); + + let test_preimage = PaymentPreimage([42; 32]); + let mismatch_payment_hash = PaymentHash([43; 32]); + let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1); + } + + #[test] + fn test_keysend_msg_with_secret_err() { + // Test that we error as expected if we receive a keysend payment that includes a payment secret. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let payer_pubkey = nodes[0].node.get_our_node_id(); + let payee_pubkey = nodes[1].node.get_our_node_id(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() }); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); + + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); + let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap(); + let first_hops = nodes[0].node.list_usable_channels(); + let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey, + Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, + nodes[0].logger).unwrap(); + + let test_preimage = PaymentPreimage([42; 32]); + let test_secret = PaymentSecret([43; 32]); + let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner()); + let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1); + } + + #[test] + fn test_multi_hop_missing_secret() { + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let logger = test_utils::TestLogger::new(); + + // Marshall an MPP route. + let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]); + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let path = route.paths[0].clone(); + route.paths.push(path); + route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0][0].short_channel_id = chan_1_id; + route.paths[0][1].short_channel_id = chan_3_id; + route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1][0].short_channel_id = chan_2_id; + route.paths[1][1].short_channel_id = chan_4_id; + + match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() { + PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { + assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) }, + _ => panic!("unexpected error") + } + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))] @@ -5093,12 +5806,12 @@ pub mod bench { use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; - use ln::msgs::ChannelMessageHandler; + use ln::msgs::{ChannelMessageHandler, Init}; use routing::network_graph::NetworkGraph; use routing::router::get_route; use util::test_utils; use util::config::UserConfig; - use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; @@ -5156,6 +5869,8 @@ pub mod bench { }); let node_b_holder = NodeHolder { node: &node_b }; + node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known() }); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known() }); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));