X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=298f88ce0cc018162cf5f2a3201512641b1cd8fd;hb=2144166bc64b6350d22b5bbf4e25477a666ad402;hp=afe3f91dd2def716b2af972eb992124f57420bcf;hpb=f1c07b5573a75b00f178c4d8c2a637e29d5acaa6;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index afe3f91d..298f88ce 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -37,13 +37,12 @@ use bitcoin::secp256k1; use chain; use chain::{Confirm, Watch, BestBlock}; -use chain::chaininterface::{BroadcasterInterface, FeeEstimator}; +use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use ln::{PaymentHash, PaymentPreimage, PaymentSecret}; -pub use ln::channel::CounterpartyForwardingInfo; use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; @@ -53,9 +52,9 @@ use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField}; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner}; use util::config::UserConfig; -use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use util::{byte_utils, events}; -use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; +use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer}; use util::chacha20::{ChaCha20, ChaChaReader}; use util::logger::{Logger, Level}; use util::errors::APIError; @@ -71,7 +70,6 @@ use core::time::Duration; #[cfg(any(test, feature = "allow_wallclock_use"))] use std::time::Instant; use core::ops::Deref; -use bitcoin::hashes::hex::ToHex; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -174,6 +172,22 @@ struct ClaimableHTLC { onion_payload: OnionPayload, } +/// A payment identifier used to uniquely identify a payment to LDK. +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct PaymentId(pub [u8; 32]); + +impl Writeable for PaymentId { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + self.0.write(w) + } +} + +impl Readable for PaymentId { + fn read(r: &mut R) -> Result { + let buf: [u8; 32] = Readable::read(r)?; + Ok(PaymentId(buf)) + } +} /// Tracks the inbound corresponding to an outbound HTLC #[derive(Clone, PartialEq)] pub(crate) enum HTLCSource { @@ -184,6 +198,7 @@ pub(crate) enum HTLCSource { /// Technically we can recalculate this from the route, but we cache it here to avoid /// doing a double-pass on route when we get a failure back first_hop_htlc_msat: u64, + payment_id: PaymentId, }, } #[cfg(test)] @@ -193,6 +208,7 @@ impl HTLCSource { path: Vec::new(), session_priv: SecretKey::from_slice(&[1; 32]).unwrap(), first_hop_htlc_msat: 0, + payment_id: PaymentId([2; 32]), } } } @@ -226,6 +242,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource struct MsgHandleErrInternal { err: msgs::LightningError, + chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { @@ -241,6 +258,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: None, shutdown_finish: None, } } @@ -251,12 +269,13 @@ impl MsgHandleErrInternal { err, action: msgs::ErrorAction::IgnoreError, }, + chan_id: None, shutdown_finish: None, } } #[inline] fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, shutdown_finish: None } + Self { err, chan_id: None, shutdown_finish: None } } #[inline] fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { @@ -270,6 +289,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: Some(channel_id), shutdown_finish: Some((shutdown_res, channel_update)), } } @@ -277,6 +297,10 @@ impl MsgHandleErrInternal { fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self { Self { err: match err { + ChannelError::Warn(msg) => LightningError { + err: msg, + action: msgs::ErrorAction::IgnoreError, + }, ChannelError::Ignore(msg) => LightningError { err: msg, action: msgs::ErrorAction::IgnoreError, @@ -300,6 +324,7 @@ impl MsgHandleErrInternal { }, }, }, + chan_id: None, shutdown_finish: None, } } @@ -375,6 +400,65 @@ struct PendingInboundPayment { min_value_msat: Option, } +/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102 +/// and later, also stores information for retrying the payment. +pub(crate) enum PendingOutboundPayment { + Legacy { + session_privs: HashSet<[u8; 32]>, + }, + Retryable { + session_privs: HashSet<[u8; 32]>, + payment_hash: PaymentHash, + payment_secret: Option, + pending_amt_msat: u64, + /// The total payment amount across all paths, used to verify that a retry is not overpaying. + total_msat: u64, + /// Our best known block height at the time this payment was initiated. + starting_block_height: u32, + }, +} + +impl PendingOutboundPayment { + fn remove(&mut self, session_priv: &[u8; 32], part_amt_msat: u64) -> bool { + let remove_res = match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + session_privs.remove(session_priv) + } + }; + if remove_res { + if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self { + *pending_amt_msat -= part_amt_msat; + } + } + remove_res + } + + fn insert(&mut self, session_priv: [u8; 32], part_amt_msat: u64) -> bool { + let insert_res = match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + session_privs.insert(session_priv) + } + }; + if insert_res { + if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self { + *pending_amt_msat += part_amt_msat; + } + } + insert_res + } + + fn remaining_parts(&self) -> usize { + match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + session_privs.len() + } + } + } +} + /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g. /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case @@ -461,17 +545,19 @@ pub struct ChannelManager>, - /// The session_priv bytes of outbound payments which are pending resolution. + /// The session_priv bytes and retry metadata of outbound payments which are pending resolution. /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors /// (if the channel has been force-closed), however we track them here to prevent duplicative - /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative + /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice. /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s) /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents /// after reloading from disk while replaying blocks against ChannelMonitors. /// + /// See `PendingOutboundPayment` documentation for more info. + /// /// Locked *after* channel_state. - pending_outbound_payments: Mutex>, + pending_outbound_payments: Mutex>, our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -623,6 +709,19 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRA #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; +/// Information needed for constructing an invoice route hint for this channel. +#[derive(Clone, Debug, PartialEq)] +pub struct CounterpartyForwardingInfo { + /// Base routing fee in millisatoshis. + pub fee_base_msat: u32, + /// Amount in millionths of a satoshi the channel will charge per transferred satoshi. + pub fee_proportional_millionths: u32, + /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart, + /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s + /// `cltv_expiry_delta` for more details. + pub cltv_expiry_delta: u16, +} + /// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`] /// to better separate parameters. #[derive(Clone, Debug, PartialEq)] @@ -777,12 +876,13 @@ macro_rules! handle_error { ($self: ident, $internal: expr, $counterparty_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, shutdown_finish }) => { + Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => { #[cfg(debug_assertions)] { // In testing, ensure there are no deadlocks where the lock is already held upon // entering the macro. assert!($self.channel_state.try_lock().is_ok()); + assert!($self.pending_events.try_lock().is_ok()); } let mut msg_events = Vec::with_capacity(2); @@ -794,6 +894,9 @@ macro_rules! handle_error { msg: update }); } + if let Some(channel_id) = chan_id { + $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, reason: ClosureReason::ProcessingError { err: err.err.clone() } }); + } } log_error!($self.logger, "{}", err.err); @@ -820,6 +923,11 @@ macro_rules! handle_error { macro_rules! convert_chan_err { ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => { match $err { + ChannelError::Warn(msg) => { + //TODO: Once warning messages are merged, we should send a `warning` message to our + //peer here. + (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) + }, ChannelError::Ignore(msg) => { (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone())) }, @@ -1130,7 +1238,7 @@ impl ChannelMana pending_msg_events: Vec::new(), }), pending_inbound_payments: Mutex::new(HashMap::new()), - pending_outbound_payments: Mutex::new(HashSet::new()), + pending_outbound_payments: Mutex::new(HashMap::new()), our_network_key: keys_manager.get_node_secret(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), @@ -1179,15 +1287,18 @@ impl ChannelMana return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } - let their_features = { + let channel = { let per_peer_state = self.per_peer_state.read().unwrap(); match per_peer_state.get(&their_network_key) { - Some(peer_state) => peer_state.lock().unwrap().latest_features.clone(), + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; + Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)? + }, None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), } }; - let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)?; let res = channel.get_open_channel(self.genesis_hash.clone()); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -1273,12 +1384,19 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } - /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs - /// will be accepted on the given channel, and after additional timeout/the closing of all - /// pending HTLCs, the channel will be closed on chain. - /// - /// May generate a SendShutdown message event on success, which should be relayed. - pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + /// Helper function that issues the channel close events + fn issue_channel_close_events(&self, channel: &Channel, closure_reason: ClosureReason) { + let mut pending_events_lock = self.pending_events.lock().unwrap(); + match channel.unbroadcasted_funding() { + Some(transaction) => { + pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction }) + }, + None => {}, + } + pending_events_lock.push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: closure_reason }); + } + + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let counterparty_node_id; @@ -1289,14 +1407,15 @@ impl ChannelMana match channel_state.by_id.entry(channel_id.clone()) { hash_map::Entry::Occupied(mut chan_entry) => { counterparty_node_id = chan_entry.get().get_counterparty_node_id(); - let their_features = { - let per_peer_state = self.per_peer_state.read().unwrap(); - match per_peer_state.get(&counterparty_node_id) { - Some(peer_state) => peer_state.lock().unwrap().latest_features.clone(), - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), - } + let per_peer_state = self.per_peer_state.read().unwrap(); + let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) { + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? + }, + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), }; - let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &their_features)?; failed_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. @@ -1323,6 +1442,7 @@ impl ChannelMana msg: channel_update }); } + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); } break Ok(()); }, @@ -1338,6 +1458,50 @@ impl ChannelMana Ok(()) } + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// * If we are the channel initiator, we will pay between our [`Background`] and + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate. + /// * If our counterparty is the channel initiator, we will require a channel closing + /// transaction feerate of at least our [`Background`] feerate or the feerate which + /// would appear on a force-closure transaction, whichever is lower. We will allow our + /// counterparty to pay as much fee as they'd like, however. + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { + self.close_channel_internal(channel_id, None) + } + + /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs + /// will be accepted on the given channel, and after additional timeout/the closing of all + /// pending HTLCs, the channel will be closed on chain. + /// + /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated + /// the channel being closed or not: + /// * If we are the channel initiator, we will pay at least this feerate on the closing + /// transaction. The upper-bound is set by + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee + /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). + /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure + /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which + /// will appear on a force-closure transaction, whichever is lower). + /// + /// May generate a SendShutdown message event on success, which should be relayed. + /// + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis + /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background + /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> { + self.close_channel_internal(channel_id, Some(target_feerate_sats_per_1000_weight)) + } + #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { let (monitor_update_option, mut failed_htlcs) = shutdown_res; @@ -1354,7 +1518,9 @@ impl ChannelMana } } - fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result { + /// `peer_node_id` should be set when we receive a message from a peer, but not set when the + /// user closes, which will be re-exposed as the `ChannelClosed` reason. + fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>, peer_msg: Option<&String>) -> Result { let mut chan = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -1367,6 +1533,13 @@ impl ChannelMana if let Some(short_id) = chan.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } + if peer_node_id.is_some() { + if let Some(peer_msg) = peer_msg { + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); + } + } else { + self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); + } chan.remove_entry().1 } else { return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()}); @@ -1388,7 +1561,7 @@ impl ChannelMana /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager. pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - match self.force_close_channel_with_peer(channel_id, None) { + match self.force_close_channel_with_peer(channel_id, None, None) { Ok(counterparty_node_id) => { self.channel_state.lock().unwrap().pending_msg_events.push( events::MessageSendEvent::HandleError { @@ -1474,8 +1647,8 @@ impl ChannelMana let mut chacha = ChaCha20::new(&rho, &[0u8; 8]); let mut chacha_stream = ChaChaReader { chacha: &mut chacha, read: Cursor::new(&msg.onion_routing_packet.hop_data[..]) }; - let (next_hop_data, next_hop_hmac) = { - match msgs::OnionHopData::read(&mut chacha_stream) { + let (next_hop_data, next_hop_hmac): (msgs::OnionHopData, _) = { + match ::read(&mut chacha_stream) { Err(err) => { let error_code = match err { msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte @@ -1769,7 +1942,7 @@ impl ChannelMana } // Only public for testing, this should otherwise never be called direcly - pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, keysend_preimage: &Option) -> Result<(), APIError> { + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let prng_seed = self.keys_manager.get_secure_random_bytes(); let session_priv_bytes = self.keys_manager.get_secure_random_bytes(); @@ -1784,7 +1957,6 @@ impl ChannelMana let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes)); let err: Result<(), _> = loop { let mut channel_lock = self.channel_state.lock().unwrap(); @@ -1802,11 +1974,27 @@ impl ChannelMana if !chan.get().is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - }, onion_packet, &self.logger), channel_state, chan) + let send_res = break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( + htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + }, onion_packet, &self.logger), + channel_state, chan); + + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable { + session_privs: HashSet::new(), + pending_amt_msat: 0, + payment_hash: *payment_hash, + payment_secret: *payment_secret, + starting_block_height: self.best_block.read().unwrap().height(), + total_msat: total_value, + }); + assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat)); + + send_res } { Some((update_add, commitment_signed, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { @@ -1885,11 +2073,11 @@ impl ChannelMana /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature /// bit set (either as required or as available). If multiple paths are present in the Route, /// we assume the invoice had the basic_mpp feature set. - pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result<(), PaymentSendFailure> { - self.send_payment_internal(route, payment_hash, payment_secret, None) + pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result { + self.send_payment_internal(route, payment_hash, payment_secret, None, None, None) } - fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option) -> Result<(), PaymentSendFailure> { + fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: Option, recv_value_msat: Option) -> Result { if route.paths.len() < 1 { return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); } @@ -1899,9 +2087,13 @@ impl ChannelMana // for now more than 10 paths likely carries too much one-path failure. return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"})); } + if payment_secret.is_none() && route.paths.len() > 1 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()})); + } let mut total_value = 0; let our_node_id = self.get_our_node_id(); let mut path_errs = Vec::with_capacity(route.paths.len()); + let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) }; 'path_check: for path in route.paths.iter() { if path.len() < 1 || path.len() > 20 { path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); @@ -1919,11 +2111,15 @@ impl ChannelMana if path_errs.iter().any(|e| e.is_err()) { return Err(PaymentSendFailure::PathParameterError(path_errs)); } + if let Some(amt_msat) = recv_value_msat { + debug_assert!(amt_msat >= total_value); + total_value = amt_msat; + } let cur_height = self.best_block.read().unwrap().height() + 1; let mut results = Vec::new(); for path in route.paths.iter() { - results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, &keysend_preimage)); + results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage)); } let mut has_ok = false; let mut has_err = false; @@ -1943,8 +2139,56 @@ impl ChannelMana } else if has_err { Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect())) } else { - Ok(()) + Ok(payment_id) + } + } + + /// Retries a payment along the given [`Route`]. + /// + /// Errors returned are a superset of those returned from [`send_payment`], so see + /// [`send_payment`] documentation for more details on errors. This method will also error if the + /// retry amount puts the payment more than 10% over the payment's total amount, or if the payment + /// for the given `payment_id` cannot be found (likely due to timeout or success). + /// + /// [`send_payment`]: [`ChannelManager::send_payment`] + pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { + const RETRY_OVERFLOW_PERCENTAGE: u64 = 10; + for path in route.paths.iter() { + if path.len() == 0 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "length-0 path in route".to_string() + })) + } } + + let (total_msat, payment_hash, payment_secret) = { + let outbounds = self.pending_outbound_payments.lock().unwrap(); + if let Some(payment) = outbounds.get(&payment_id) { + match payment { + PendingOutboundPayment::Retryable { + total_msat, payment_hash, payment_secret, pending_amt_msat, .. + } => { + let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum(); + if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string() + })) + } + (*total_msat, *payment_hash, *payment_secret) + }, + PendingOutboundPayment::Legacy { .. } => { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string() + })) + } + } + } else { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)), + })) + } + }; + return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ()) } /// Send a spontaneous payment, which is a payment that does not require the recipient to have @@ -1953,18 +2197,22 @@ impl ChannelMana /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will /// never reach the recipient. /// + /// See [`send_payment`] documentation for more details on the return value of this function. + /// /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See /// [`send_payment`] for more information about the risks of duplicate preimage usage. /// + /// Note that `route` must have exactly one path. + /// /// [`send_payment`]: Self::send_payment - pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result { + pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let preimage = match payment_preimage { Some(p) => p, None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()), }; let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner()); - match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) { - Ok(()) => Ok(payment_hash), + match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) { + Ok(payment_id) => Ok((payment_hash, payment_id)), Err(e) => Err(e) } } @@ -2312,15 +2560,16 @@ impl ChannelMana // close channel and then send error message to peer. let counterparty_node_id = chan.get().get_counterparty_node_id(); let err: Result<(), _> = match e { - ChannelError::Ignore(_) => { + ChannelError::Ignore(_) | ChannelError::Warn(_) => { panic!("Stated return value requirements in send_commitment() were not met"); - }, + } ChannelError::Close(msg) => { log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg); let (channel_id, mut channel) = chan.remove_entry(); if let Some(short_id) = channel.get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } + // ChannelClosed event is generated by handle_error for us. Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } @@ -2550,48 +2799,160 @@ impl ChannelMana self.process_background_events(); } - /// If a peer is disconnected we mark any channels with that peer as 'disabled'. - /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate - /// to inform the network about the uselessness of these channels. + fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { + if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } + // If the feerate has decreased by less than half, don't bother + if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + return (true, NotifyOption::SkipPersist, Ok(())); + } + if !chan.is_live() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + return (true, NotifyOption::SkipPersist, Ok(())); + } + log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + + let mut retain_channel = true; + let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { + Ok(res) => Ok(res), + Err(e) => { + let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + if drop { retain_channel = false; } + Err(res) + } + }; + let ret_err = match res { + Ok(Some((update_fee, commitment_signed, monitor_update))) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id); + if drop { retain_channel = false; } + res + } else { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + Ok(()) + } + }, + Ok(None) => Ok(()), + Err(e) => Err(e), + }; + (retain_channel, NotifyOption::DoPersist, ret_err) + } + + #[cfg(fuzzing)] + /// In chanmon_consistency we want to sometimes do the channel fee updates done in + /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers + /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what + /// it wants to detect). Thus, we have a variant exposed here for its benefit. + pub fn maybe_update_chan_fees(&self) { + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + let mut should_persist = NotifyOption::SkipPersist; + + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push(err); + } + retain_channel + }); + } + + should_persist + }); + } + + /// Performs actions which should happen on startup and roughly once per minute thereafter. /// - /// This method handles all the details, and must be called roughly once per minute. + /// This currently includes: + /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels, + /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more + /// than a minute, informing the network that they should no longer attempt to route over + /// the channel. /// - /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call. + /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate + /// estimate fetches. pub fn timer_tick_occurred(&self) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = NotifyOption::SkipPersist; if self.process_background_events() { should_persist = NotifyOption::DoPersist; } - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - for (_, chan) in channel_state.by_id.iter_mut() { - match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), - ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Disabled); - }, - ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Enabled); - }, - _ => {}, - } + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + let counterparty_node_id = chan.get_counterparty_node_id(); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push((err, counterparty_node_id)); + } + if !retain_channel { return false; } + + if let Err(e) = chan.timer_check_closing_negotiation_progress() { + let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + handle_errors.push((Err(err), chan.get_counterparty_node_id())); + if needs_close { return false; } + } + + match chan.channel_update_status() { + ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), + ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), + ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), + ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), + ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Disabled); + }, + ChannelUpdateStatus::EnabledStaged if chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Enabled); + }, + _ => {}, + } + + true + }); } + for (err, counterparty_node_id) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } should_persist }); } @@ -2642,22 +3003,27 @@ impl ChannelMana self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); }, - HTLCSource::OutboundRoute { session_priv, .. } => { - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { - self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { - payment_hash, - rejected_by_dest: false, -#[cfg(test)] - error_code: None, -#[cfg(test)] - error_data: None, - } - ) + HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if payment.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) { + self.pending_events.lock().unwrap().push( + events::Event::PaymentPathFailed { + payment_hash, + rejected_by_dest: false, + network_update: None, + all_paths_failed: payment.get().remaining_parts() == 0, + path: path.clone(), + short_channel_id: None, + #[cfg(test)] + error_code: None, + #[cfg(test)] + error_data: None, + } + ); + } } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); } @@ -2682,12 +3048,20 @@ impl ChannelMana // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { - HTLCSource::OutboundRoute { ref path, session_priv, .. } => { - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { + HTLCSource::OutboundRoute { ref path, session_priv, payment_id, .. } => { + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + let mut all_paths_failed = false; + if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(payment_id) { + if !sessions.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) { + log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); + return; + } + if sessions.get().remaining_parts() == 0 { + all_paths_failed = true; + } + } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; } @@ -2696,23 +3070,20 @@ impl ChannelMana match &onion_error { &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] - let (channel_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] - let (channel_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); // TODO: If we decided to blame ourselves (or one of our channels) in // process_onion_failure we should close that channel as it implies our // next-hop is needlessly blaming us! - if let Some(update) = channel_update { - self.channel_state.lock().unwrap().pending_msg_events.push( - events::MessageSendEvent::PaymentFailureNetworkUpdate { - update, - } - ); - } self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { + events::Event::PaymentPathFailed { payment_hash: payment_hash.clone(), rejected_by_dest: !payment_retryable, + network_update, + all_paths_failed, + path: path.clone(), + short_channel_id, #[cfg(test)] error_code: onion_error_code, #[cfg(test)] @@ -2727,16 +3098,20 @@ impl ChannelMana ref data, .. } => { // we get a fail_malformed_htlc from the first hop - // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary + // TODO: We'd like to generate a NetworkUpdate for temporary // failures here, but that would be insufficient as get_route // generally ignores its view of our own channels as we provide them via // ChannelDetails. // TODO: For non-temporary failures, we really should be closing the // channel here as we apparently can't relay through them anyway. self.pending_events.lock().unwrap().push( - events::Event::PaymentFailed { + events::Event::PaymentPathFailed { payment_hash: payment_hash.clone(), rejected_by_dest: path.len() == 1, + network_update: None, + all_paths_failed, + path: path.clone(), + short_channel_id: Some(path.first().unwrap().short_channel_id), #[cfg(test)] error_code: Some(*failure_code), #[cfg(test)] @@ -2933,17 +3308,22 @@ impl ChannelMana fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { match source { - HTLCSource::OutboundRoute { session_priv, .. } => { + HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); - if { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes) - } { - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentSent { - payment_preimage - }); + let mut session_priv_bytes = [0; 32]; + session_priv_bytes.copy_from_slice(&session_priv[..]); + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + let found_payment = if let Some(mut sessions) = outbounds.remove(&payment_id) { + sessions.remove(&session_priv_bytes, path.last().unwrap().fee_msat) + } else { false }; + if found_payment { + let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + self.pending_events.lock().unwrap().push( + events::Event::PaymentSent { + payment_preimage, + payment_hash: payment_hash + } + ); } else { log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0)); } @@ -3068,7 +3448,7 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); } - let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), their_features, msg, 0, &self.default_configuration) + let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3094,7 +3474,7 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id)); } - try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_features), channel_state, chan); + try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, &their_features), channel_state, chan); (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id)) @@ -3182,7 +3562,16 @@ impl ChannelMana Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); + let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); + } + } + return res } funding_tx }, @@ -3242,7 +3631,13 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (shutdown, closing_signed, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &self.keys_manager, &their_features, &msg), channel_state, chan_entry); + if !chan_entry.get().received_shutdown() { + log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.", + log_bytes!(msg.channel_id), + if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" }); + } + + let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); dropped_htlcs = htlcs; // Update the monitor with the shutdown script if necessary. @@ -3263,13 +3658,6 @@ impl ChannelMana msg, }); } - if let Some(msg) = closing_signed { - // TODO: Do not send this if the monitor update failed. - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: *counterparty_node_id, - msg, - }); - } break Ok(()); }, @@ -3326,6 +3714,7 @@ impl ChannelMana msg: update }); } + self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure); } Ok(()) } @@ -3350,33 +3739,34 @@ impl ChannelMana } let create_pending_htlc_status = |chan: &Channel, pending_forward_info: PendingHTLCStatus, error_code: u16| { - // Ensure error_code has the UPDATE flag set, since by default we send a - // channel update along as part of failing the HTLC. - assert!((error_code & 0x1000) != 0); // If the update_add is completely bogus, the call will Err and we will close, // but if we've sent a shutdown and they haven't acknowledged it yet, we just // want to reject the new HTLC and fail it backwards instead of forwarding. match pending_forward_info { PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { - let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) { - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{ - let mut res = Vec::with_capacity(8 + 128); - // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791 - res.extend_from_slice(&byte_utils::be16_to_array(0)); - res.extend_from_slice(&upd.encode_with_len()[..]); - res - }[..]) + let reason = if (error_code & 0x1000) != 0 { + if let Ok(upd) = self.get_channel_update_for_unicast(chan) { + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{ + let mut res = Vec::with_capacity(8 + 128); + // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791 + res.extend_from_slice(&byte_utils::be16_to_array(0)); + res.extend_from_slice(&upd.encode_with_len()[..]); + res + }[..]) + } else { + // The only case where we'd be unable to + // successfully get a channel update is if the + // channel isn't in the fully-funded state yet, + // implying our counterparty is trying to route + // payments over the channel back to themselves + // (because no one else should know the short_id + // is a lightning channel yet). We should have + // no problem just calling this + // unknown_next_peer (0x4000|10). + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[]) + } } else { - // The only case where we'd be unable to - // successfully get a channel update is if the - // channel isn't in the fully-funded state yet, - // implying our counterparty is trying to route - // payments over the channel back to themselves - // (cause no one else should know the short_id - // is a lightning channel yet). We should have - // no problem just calling this - // unknown_next_peer (0x4000|10). - onion_utils::build_first_hop_failure_packet(incoming_shared_secret, 0x4000|10, &[]) + onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[]) }; let msg = msgs::UpdateFailHTLC { channel_id: msg.channel_id, @@ -3455,8 +3845,8 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let (revoke_and_ack, commitment_signed, closing_signed, monitor_update) = - match chan.get_mut().commitment_signed(&msg, &self.fee_estimator, &self.logger) { + let (revoke_and_ack, commitment_signed, monitor_update) = + match chan.get_mut().commitment_signed(&msg, &self.logger) { Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), Err((Some(update), e)) => { assert!(chan.get().is_awaiting_monitor_update()); @@ -3468,7 +3858,6 @@ impl ChannelMana }; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); - //TODO: Rebroadcast closing_signed if present on monitor update restoration } channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), @@ -3487,12 +3876,6 @@ impl ChannelMana }, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } Ok(()) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3548,12 +3931,12 @@ impl ChannelMana break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let (commitment_update, pending_forwards, pending_failures, closing_signed, monitor_update, htlcs_to_fail_in) = - break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), channel_state, chan); + let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) = + break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); htlcs_to_fail = htlcs_to_fail_in; if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { - assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); } else { if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) { @@ -3567,12 +3950,6 @@ impl ChannelMana updates, }); } - if let Some(msg) = closing_signed { - channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { - node_id: counterparty_node_id.clone(), - msg, - }); - } break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap())) }, hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3717,62 +4094,6 @@ impl ChannelMana Ok(()) } - /// Begin Update fee process. Allowed only on an outbound channel. - /// If successful, will generate a UpdateHTLCs event, so you should probably poll - /// PeerManager::process_events afterwards. - /// Note: This API is likely to change! - /// (C-not exported) Cause its doc(hidden) anyway - #[doc(hidden)] - pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let counterparty_node_id; - let err: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - - match channel_state.by_id.entry(channel_id) { - hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}), - hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_outbound() { - return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()}); - } - if chan.get().is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()}); - } - counterparty_node_id = chan.get().get_counterparty_node_id(); - if let Some((update_fee, commitment_signed, monitor_update)) = - break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan) - { - if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - unimplemented!(); - } - log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, - }); - } - }, - } - return Ok(()) - }; - - match handle_error!(self, err, counterparty_node_id) { - Ok(_) => unreachable!(), - Err(e) => { Err(APIError::APIMisuseError { err: e.err })} - } - } - /// Process pending events from the `chain::Watch`, returning whether any events were processed. fn process_pending_monitor_events(&self) -> bool { let mut failed_channels = Vec::new(); @@ -3789,7 +4110,7 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } }, - MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => { + MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let by_id = &mut channel_state.by_id; @@ -3805,6 +4126,7 @@ impl ChannelMana msg: update }); } + self.issue_channel_close_events(&chan, ClosureReason::CommitmentTxConfirmed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -3866,13 +4188,14 @@ impl ChannelMana Err(e) => { let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + // ChannelClosed event is generated by handle_error for us !close_channel } } }); } - let has_update = has_monitor_update || !failed_htlcs.is_empty(); + let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty(); for (failures, channel_id) in failed_htlcs.drain(..) { self.fail_holding_cell_htlcs(failures, channel_id); } @@ -3884,6 +4207,65 @@ impl ChannelMana has_update } + /// Check whether any channels have finished removing all pending updates after a shutdown + /// exchange and can now send a closing_signed. + /// Returns whether any closing_signed messages were generated. + fn maybe_generate_initial_closing_signed(&self) -> bool { + let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); + let mut has_update = false; + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let by_id = &mut channel_state.by_id; + let short_to_id = &mut channel_state.short_to_id; + let pending_msg_events = &mut channel_state.pending_msg_events; + + by_id.retain(|channel_id, chan| { + match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { + Ok((msg_opt, tx_opt)) => { + if let Some(msg) = msg_opt { + has_update = true; + pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { + node_id: chan.get_counterparty_node_id(), msg, + }); + } + if let Some(tx) = tx_opt { + // We're done with this channel. We got a closing_signed and sent back + // a closing_signed with a closing transaction to broadcast. + if let Some(short_id) = chan.get_short_channel_id() { + short_to_id.remove(&short_id); + } + + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + + self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); + + log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); + self.tx_broadcaster.broadcast_transaction(&tx); + false + } else { true } + }, + Err(e) => { + has_update = true; + let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id); + handle_errors.push((chan.get_counterparty_node_id(), Err(res))); + !close_channel + } + } + }); + } + + for (counterparty_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); + } + + has_update + } + /// Handle a list of channel failures during a block_connected or block_disconnected call, /// pushing the channel monitor update (if any) to the background events queue and removing the /// Channel object. @@ -4010,10 +4392,15 @@ impl ChannelMana #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))] pub fn get_and_clear_pending_events(&self) -> Vec { let events = core::cell::RefCell::new(Vec::new()); - let event_handler = |event| events.borrow_mut().push(event); + let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone()); self.process_pending_events(&event_handler); events.into_inner() } + + #[cfg(test)] + pub fn has_pending_payments(&self) -> bool { + !self.pending_outbound_payments.lock().unwrap().is_empty() + } } impl MessageSendEventsProvider for ChannelManager @@ -4037,6 +4424,9 @@ impl MessageSend if self.check_free_holding_cells() { result = NotifyOption::DoPersist; } + if self.maybe_generate_initial_closing_signed() { + result = NotifyOption::DoPersist; + } let mut pending_events = Vec::new(); let mut channel_state = self.channel_state.lock().unwrap(); @@ -4084,7 +4474,7 @@ where } for event in pending_events.drain(..) { - handler.handle_event(event); + handler.handle_event(&event); } result @@ -4186,6 +4576,16 @@ where payment_secrets.retain(|_, inbound_payment| { inbound_payment.expiry_time > header.time as u64 }); + + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + outbounds.retain(|_, payment| { + const PAYMENT_EXPIRY_BLOCKS: u32 = 3; + if payment.remaining_parts() != 0 { return true } + if let PendingOutboundPayment::Retryable { starting_block_height, .. } = payment { + return *starting_block_height + PAYMENT_EXPIRY_BLOCKS > height + } + true + }); } fn get_relevant_txids(&self) -> Vec { @@ -4279,6 +4679,7 @@ where msg: update }); } + self.issue_channel_close_events(channel, ClosureReason::CommitmentTxConfirmed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: e }, @@ -4469,6 +4870,7 @@ impl msg: update }); } + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); false } else { true @@ -4483,6 +4885,7 @@ impl if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); } + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { no_channels_remain = false; @@ -4509,7 +4912,6 @@ impl &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, - &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, &events::MessageSendEvent::SendShortIdsQuery { .. } => false, &events::MessageSendEvent::SendReplyChannelRange { .. } => false, @@ -4574,12 +4976,12 @@ impl for chan in self.list_channels() { if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } else { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel - let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id)); + let _ = self.force_close_channel_with_peer(&msg.channel_id, Some(counterparty_node_id), Some(&msg.data)); } } } @@ -4681,10 +5083,74 @@ impl_writeable_tlv_based!(PendingHTLCInfo, { (8, outgoing_cltv_value, required) }); -impl_writeable_tlv_based_enum!(HTLCFailureMsg, ; - (0, Relay), - (1, Malformed), -); + +impl Writeable for HTLCFailureMsg { + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + match self { + HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => { + 0u8.write(writer)?; + channel_id.write(writer)?; + htlc_id.write(writer)?; + reason.write(writer)?; + }, + HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + channel_id, htlc_id, sha256_of_onion, failure_code + }) => { + 1u8.write(writer)?; + channel_id.write(writer)?; + htlc_id.write(writer)?; + sha256_of_onion.write(writer)?; + failure_code.write(writer)?; + }, + } + Ok(()) + } +} + +impl Readable for HTLCFailureMsg { + fn read(reader: &mut R) -> Result { + let id: u8 = Readable::read(reader)?; + match id { + 0 => { + Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + channel_id: Readable::read(reader)?, + htlc_id: Readable::read(reader)?, + reason: Readable::read(reader)?, + })) + }, + 1 => { + Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + channel_id: Readable::read(reader)?, + htlc_id: Readable::read(reader)?, + sha256_of_onion: Readable::read(reader)?, + failure_code: Readable::read(reader)?, + })) + }, + // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but + // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network + // messages contained in the variants. + // In version 0.0.101, support for reading the variants with these types was added, and + // we should migrate to writing these variants when UpdateFailHTLC or + // UpdateFailMalformedHTLC get TLV fields. + 2 => { + let length: BigSize = Readable::read(reader)?; + let mut s = FixedLengthReader::new(reader, length.0); + let res = Readable::read(&mut s)?; + s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes + Ok(HTLCFailureMsg::Relay(res)) + }, + 3 => { + let length: BigSize = Readable::read(reader)?; + let mut s = FixedLengthReader::new(reader, length.0); + let res = Readable::read(&mut s)?; + s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes + Ok(HTLCFailureMsg::Malformed(res)) + }, + _ => Err(DecodeError::UnknownRequiredFeature), + } + } +} + impl_writeable_tlv_based_enum!(PendingHTLCStatus, ; (0, Forward), (1, Fail), @@ -4755,14 +5221,60 @@ impl Readable for ClaimableHTLC { } } -impl_writeable_tlv_based_enum!(HTLCSource, - (0, OutboundRoute) => { - (0, session_priv, required), - (2, first_hop_htlc_msat, required), - (4, path, vec_type), - }, ; - (1, PreviousHopData) -); +impl Readable for HTLCSource { + fn read(reader: &mut R) -> Result { + let id: u8 = Readable::read(reader)?; + match id { + 0 => { + let mut session_priv: ::util::ser::OptionDeserWrapper = ::util::ser::OptionDeserWrapper(None); + let mut first_hop_htlc_msat: u64 = 0; + let mut path = Some(Vec::new()); + let mut payment_id = None; + read_tlv_fields!(reader, { + (0, session_priv, required), + (1, payment_id, option), + (2, first_hop_htlc_msat, required), + (4, path, vec_type), + }); + if payment_id.is_none() { + // For backwards compat, if there was no payment_id written, use the session_priv bytes + // instead. + payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref())); + } + Ok(HTLCSource::OutboundRoute { + session_priv: session_priv.0.unwrap(), + first_hop_htlc_msat: first_hop_htlc_msat, + path: path.unwrap(), + payment_id: payment_id.unwrap(), + }) + } + 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), + _ => Err(DecodeError::UnknownRequiredFeature), + } + } +} + +impl Writeable for HTLCSource { + fn write(&self, writer: &mut W) -> Result<(), ::io::Error> { + match self { + HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => { + 0u8.write(writer)?; + let payment_id_opt = Some(payment_id); + write_tlv_fields!(writer, { + (0, session_priv, required), + (1, payment_id_opt, option), + (2, first_hop_htlc_msat, required), + (4, path, vec_type), + }); + } + HTLCSource::PreviousHopData(ref field) => { + 1u8.write(writer)?; + field.write(writer)?; + } + } + Ok(()) + } +} impl_writeable_tlv_based_enum!(HTLCFailReason, (0, LightningError) => { @@ -4795,6 +5307,20 @@ impl_writeable_tlv_based!(PendingInboundPayment, { (8, min_value_msat, required), }); +impl_writeable_tlv_based_enum!(PendingOutboundPayment, + (0, Legacy) => { + (0, session_privs, required), + }, + (2, Retryable) => { + (0, session_privs, required), + (2, payment_hash, required), + (4, payment_secret, option), + (6, total_msat, required), + (8, pending_amt_msat, required), + (10, starting_block_height, required), + }, +;); + impl Writeable for ChannelManager where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -4883,12 +5409,37 @@ impl Writeable f } let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); - (pending_outbound_payments.len() as u64).write(writer)?; - for session_priv in pending_outbound_payments.iter() { - session_priv.write(writer)?; + // For backwards compat, write the session privs and their total length. + let mut num_pending_outbounds_compat: u64 = 0; + for (_, outbound) in pending_outbound_payments.iter() { + num_pending_outbounds_compat += outbound.remaining_parts() as u64; + } + num_pending_outbounds_compat.write(writer)?; + for (_, outbound) in pending_outbound_payments.iter() { + match outbound { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + for session_priv in session_privs.iter() { + session_priv.write(writer)?; + } + } + } } - write_tlv_fields!(writer, {}); + // Encode without retry info for 0.0.101 compatibility. + let mut pending_outbound_payments_no_retry: HashMap> = HashMap::new(); + for (id, outbound) in pending_outbound_payments.iter() { + match outbound { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + pending_outbound_payments_no_retry.insert(*id, session_privs.clone()); + } + } + } + write_tlv_fields!(writer, { + (1, pending_outbound_payments_no_retry, required), + (3, pending_outbound_payments, required), + }); Ok(()) } @@ -5025,6 +5576,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128)); let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); + let mut channel_closures = Vec::new(); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, &args.keys_manager)?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; @@ -5048,9 +5600,17 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: + log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); + log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); + log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", + log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); let (_, mut new_failed_htlcs) = channel.force_shutdown(true); failed_htlcs.append(&mut new_failed_htlcs); monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); + channel_closures.push(events::Event::ChannelClosed { + channel_id: channel.channel_id(), + reason: ClosureReason::OutdatedChannelManager + }); } else { if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_id.insert(short_channel_id, channel.channel_id()); @@ -5116,6 +5676,16 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } + if forward_htlcs_count > 0 { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); @@ -5137,19 +5707,43 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - let pending_outbound_payments_count: u64 = Readable::read(reader)?; - let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32)); - for _ in 0..pending_outbound_payments_count { - if !pending_outbound_payments.insert(Readable::read(reader)?) { - return Err(DecodeError::InvalidValue); - } + let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; + let mut pending_outbound_payments_compat: HashMap = + HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); + for _ in 0..pending_outbound_payments_count_compat { + let session_priv = Readable::read(reader)?; + let payment = PendingOutboundPayment::Legacy { + session_privs: [session_priv].iter().cloned().collect() + }; + if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { + return Err(DecodeError::InvalidValue) + }; } - read_tlv_fields!(reader, {}); + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. + let mut pending_outbound_payments_no_retry: Option>> = None; + let mut pending_outbound_payments = None; + read_tlv_fields!(reader, { + (1, pending_outbound_payments_no_retry, option), + (3, pending_outbound_payments, option), + }); + if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { + pending_outbound_payments = Some(pending_outbound_payments_compat); + } else if pending_outbound_payments.is_none() { + let mut outbounds = HashMap::new(); + for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { + outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); + } + pending_outbound_payments = Some(outbounds); + } let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes()); + if !channel_closures.is_empty() { + pending_events_read.append(&mut channel_closures); + } + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -5166,7 +5760,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_msg_events: Vec::new(), }), pending_inbound_payments: Mutex::new(pending_inbound_payments), - pending_outbound_payments: Mutex::new(pending_outbound_payments), + pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), our_network_key: args.keys_manager.get_node_secret(), our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()), @@ -5204,11 +5798,13 @@ mod tests { use bitcoin::hashes::sha256::Hash as Sha256; use core::time::Duration; use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; + use ln::channelmanager::{PaymentId, PaymentSendFailure}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; use ln::msgs; use ln::msgs::ChannelMessageHandler; use routing::router::{get_keysend_route, get_route}; + use util::errors::APIError; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; use util::test_utils; @@ -5351,12 +5947,13 @@ mod tests { // First, send a partial MPP payment. let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]); + let payment_id = PaymentId([42; 32]); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap(); + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5386,7 +5983,7 @@ mod tests { expect_payment_failed!(nodes[0], our_payment_hash, true); // Send the second half of the original MPP payment. - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap(); + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5424,17 +6021,13 @@ mod tests { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); check_added_monitors!(nodes[0], 1); - // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here. + // Note that successful MPP payments will generate 1 event upon the first path's success. No + // further events will be generated for subsequence path successes. let events = nodes[0].node.get_and_clear_pending_events(); match events[0] { - Event::PaymentSent { payment_preimage: ref preimage } => { - assert_eq!(payment_preimage, *preimage); - }, - _ => panic!("Unexpected event"), - } - match events[1] { - Event::PaymentSent { payment_preimage: ref preimage } => { + Event::PaymentSent { payment_preimage: ref preimage, payment_hash: ref hash } => { assert_eq!(payment_preimage, *preimage); + assert_eq!(our_payment_hash, *hash); }, _ => panic!("Unexpected event"), } @@ -5458,7 +6051,7 @@ mod tests { let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000); // Next, attempt a keysend payment and make sure it fails. - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -5486,8 +6079,8 @@ mod tests { // To start (2), send a keysend payment but don't claim it. let payment_preimage = PaymentPreimage([42; 32]); - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); - let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5538,15 +6131,15 @@ mod tests { nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); - let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap(); + let network_graph = &nodes[0].net_graph_msg_handler.network_graph; let first_hops = nodes[0].node.list_usable_channels(); - let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey, + let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey, Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, nodes[0].logger).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let mismatch_payment_hash = PaymentHash([43; 32]); - let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap(); + let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -5574,16 +6167,16 @@ mod tests { nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); - let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap(); + let network_graph = &nodes[0].net_graph_msg_handler.network_graph; let first_hops = nodes[0].node.list_usable_channels(); - let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey, + let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey, Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, nodes[0].logger).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let test_secret = PaymentSecret([43; 32]); let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner()); - let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap(); + let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -5596,6 +6189,39 @@ mod tests { nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1); } + + #[test] + fn test_multi_hop_missing_secret() { + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let logger = test_utils::TestLogger::new(); + + // Marshall an MPP route. + let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]); + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let path = route.paths[0].clone(); + route.paths.push(path); + route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0][0].short_channel_id = chan_1_id; + route.paths[0][1].short_channel_id = chan_3_id; + route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1][0].short_channel_id = chan_2_id; + route.paths[1][1].short_channel_id = chan_4_id; + + match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() { + PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { + assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) }, + _ => panic!("unexpected error") + } + } } #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]