X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=019d61c57c09a2d3e8f0606a3353a49198cc5425;hb=f624cc9ac231d7944548ce392b0d419ee66c089a;hp=a4684bfe6cc2960438bfdc9e2e575ed2b7aa2f79;hpb=0273ac52db6df30b798b1815b093d9ef065c8c17;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a4684bfe..019d61c5 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -36,9 +36,9 @@ use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1; use chain; -use chain::{Confirm, Watch, BestBlock}; +use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock}; use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; +use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. @@ -172,20 +172,20 @@ struct ClaimableHTLC { onion_payload: OnionPayload, } -/// A payment identifier used to correlate an MPP payment's per-path HTLC sources internally. +/// A payment identifier used to uniquely identify a payment to LDK. #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] -pub(crate) struct MppId(pub [u8; 32]); +pub struct PaymentId(pub [u8; 32]); -impl Writeable for MppId { +impl Writeable for PaymentId { fn write(&self, w: &mut W) -> Result<(), io::Error> { self.0.write(w) } } -impl Readable for MppId { +impl Readable for PaymentId { fn read(r: &mut R) -> Result { let buf: [u8; 32] = Readable::read(r)?; - Ok(MppId(buf)) + Ok(PaymentId(buf)) } } /// Tracks the inbound corresponding to an outbound HTLC @@ -198,7 +198,7 @@ pub(crate) enum HTLCSource { /// Technically we can recalculate this from the route, but we cache it here to avoid /// doing a double-pass on route when we get a failure back first_hop_htlc_msat: u64, - mpp_id: MppId, + payment_id: PaymentId, }, } #[cfg(test)] @@ -208,7 +208,7 @@ impl HTLCSource { path: Vec::new(), session_priv: SecretKey::from_slice(&[1; 32]).unwrap(), first_hop_htlc_msat: 0, - mpp_id: MppId([2; 32]), + payment_id: PaymentId([2; 32]), } } } @@ -242,7 +242,7 @@ type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource struct MsgHandleErrInternal { err: msgs::LightningError, - chan_id: Option<[u8; 32]>, // If Some a channel of ours has been closed + chan_id: Option<([u8; 32], u64)>, // If Some a channel of ours has been closed shutdown_finish: Option<(ShutdownResult, Option)>, } impl MsgHandleErrInternal { @@ -278,7 +278,7 @@ impl MsgHandleErrInternal { Self { err, chan_id: None, shutdown_finish: None } } #[inline] - fn from_finish_shutdown(err: String, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option) -> Self { + fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u64, shutdown_res: ShutdownResult, channel_update: Option) -> Self { Self { err: LightningError { err: err.clone(), @@ -289,7 +289,7 @@ impl MsgHandleErrInternal { }, }, }, - chan_id: Some(channel_id), + chan_id: Some((channel_id, user_channel_id)), shutdown_finish: Some((shutdown_res, channel_update)), } } @@ -400,6 +400,65 @@ struct PendingInboundPayment { min_value_msat: Option, } +/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102 +/// and later, also stores information for retrying the payment. +pub(crate) enum PendingOutboundPayment { + Legacy { + session_privs: HashSet<[u8; 32]>, + }, + Retryable { + session_privs: HashSet<[u8; 32]>, + payment_hash: PaymentHash, + payment_secret: Option, + pending_amt_msat: u64, + /// The total payment amount across all paths, used to verify that a retry is not overpaying. + total_msat: u64, + /// Our best known block height at the time this payment was initiated. + starting_block_height: u32, + }, +} + +impl PendingOutboundPayment { + fn remove(&mut self, session_priv: &[u8; 32], part_amt_msat: u64) -> bool { + let remove_res = match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + session_privs.remove(session_priv) + } + }; + if remove_res { + if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self { + *pending_amt_msat -= part_amt_msat; + } + } + remove_res + } + + fn insert(&mut self, session_priv: [u8; 32], part_amt_msat: u64) -> bool { + let insert_res = match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + session_privs.insert(session_priv) + } + }; + if insert_res { + if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, .. } = self { + *pending_amt_msat += part_amt_msat; + } + } + insert_res + } + + fn remaining_parts(&self) -> usize { + match self { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + session_privs.len() + } + } + } +} + /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g. /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static /// lifetimes). Other times you can afford a reference, which is more efficient, in which case @@ -486,7 +545,7 @@ pub struct ChannelManager>, - /// The session_priv bytes of outbound payments which are pending resolution. + /// The session_priv bytes and retry metadata of outbound payments which are pending resolution. /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors /// (if the channel has been force-closed), however we track them here to prevent duplicative /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative @@ -495,11 +554,10 @@ pub struct ChannelManager>>, + pending_outbound_payments: Mutex>, our_network_key: SecretKey, our_network_pubkey: PublicKey, @@ -718,8 +776,8 @@ pub struct ChannelDetails { /// /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat pub unspendable_punishment_reserve: Option, - /// The user_id passed in to create_channel, or 0 if the channel was inbound. - pub user_id: u64, + /// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound. + pub user_channel_id: u64, /// The available outbound capacity for sending HTLCs to the remote peer. This does not include /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not /// available for inclusion in new outbound HTLCs). This further does not include any pending @@ -836,8 +894,11 @@ macro_rules! handle_error { msg: update }); } - if let Some(channel_id) = chan_id { - $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id, reason: ClosureReason::ProcessingError { err: err.err.clone() } }); + if let Some((channel_id, user_channel_id)) = chan_id { + $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { + channel_id, user_channel_id, + reason: ClosureReason::ProcessingError { err: err.err.clone() } + }); } } @@ -879,7 +940,8 @@ macro_rules! convert_chan_err { $short_to_id.remove(&short_id); } let shutdown_res = $channel.force_shutdown(true); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), + shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, ChannelError::CloseDelayBroadcast(msg) => { log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); @@ -887,7 +949,8 @@ macro_rules! convert_chan_err { $short_to_id.remove(&short_id); } let shutdown_res = $channel.force_shutdown(false); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(), + shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) } } } @@ -939,7 +1002,7 @@ macro_rules! handle_monitor_err { ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new()) }; - ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => { + ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { match $err { ChannelMonitorUpdateErr::PermanentFailure => { log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); @@ -955,12 +1018,12 @@ macro_rules! handle_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(), $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() )); (res, true) }, ChannelMonitorUpdateErr::TemporaryFailure => { - log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails", + log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations", log_bytes!($chan_id[..]), if $resend_commitment && $resend_raa { match $action_type { @@ -971,25 +1034,29 @@ macro_rules! handle_monitor_err { else if $resend_raa { "RAA" } else { "nothing" }, (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(), - (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len()); + (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(), + (&$failed_finalized_fulfills as &Vec).len()); if !$resend_commitment { debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa); } if !$resend_raa { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); } - $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails); + $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills); (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false) }, } }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { { - let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key()); + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { + let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); if drop { $entry.remove_entry(); } res } }; + ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new()); + } } macro_rules! return_monitor_err { @@ -1209,22 +1276,31 @@ impl ChannelMana /// Creates a new outbound channel to the given remote node and with the given value. /// - /// user_id will be provided back as user_channel_id in FundingGenerationReady events to allow - /// tracking of which events correspond with which create_channel call. Note that the - /// user_channel_id defaults to 0 for inbound channels, so you may wish to avoid using 0 for - /// user_id here. user_id has no meaning inside of LDK, it is simply copied to events and - /// otherwise ignored. - /// - /// If successful, will generate a SendOpenChannel message event, so you should probably poll - /// PeerManager::process_events afterwards. + /// `user_channel_id` will be provided back as in + /// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events + /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to 0 + /// for inbound channels, so you may wish to avoid using 0 for `user_channel_id` here. + /// `user_channel_id` has no meaning inside of LDK, it is simply copied to events and otherwise + /// ignored. /// - /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is - /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000. + /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is + /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`. /// /// Note that we do not check if you are currently connected to the given peer. If no /// connection is available, the outbound `open_channel` message may fail to send, resulting in - /// the channel eventually being silently forgotten. - pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option) -> Result<(), APIError> { + /// the channel eventually being silently forgotten (dropped on reload). + /// + /// Returns the new Channel's temporary `channel_id`. This ID will appear as + /// [`Event::FundingGenerationReady::temporary_channel_id`] and in + /// [`ChannelDetails::channel_id`] until after + /// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for + /// one derived from the funding transaction's TXID. If the counterparty rejects the channel + /// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`]. + /// + /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id + /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id + /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id + pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u64, override_config: Option) -> Result<[u8; 32], APIError> { if channel_value_satoshis < 1000 { return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); } @@ -1236,7 +1312,7 @@ impl ChannelMana let peer_state = peer_state.lock().unwrap(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_id, config)? + Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config)? }, None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), } @@ -1247,8 +1323,9 @@ impl ChannelMana // We want to make sure the lock is actually acquired by PersistenceNotifierGuard. debug_assert!(&self.total_consistency_lock.try_write().is_err()); + let temporary_channel_id = channel.channel_id(); let mut channel_state = self.channel_state.lock().unwrap(); - match channel_state.by_id.entry(channel.channel_id()) { + match channel_state.by_id.entry(temporary_channel_id) { hash_map::Entry::Occupied(_) => { if cfg!(feature = "fuzztarget") { return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() }); @@ -1262,7 +1339,7 @@ impl ChannelMana node_id: their_network_key, msg: res, }); - Ok(()) + Ok(temporary_channel_id) } fn list_channels_with_filter)) -> bool>(&self, f: Fn) -> Vec { @@ -1288,7 +1365,7 @@ impl ChannelMana unspendable_punishment_reserve: to_self_reserve_satoshis, inbound_capacity_msat, outbound_capacity_msat, - user_id: channel.get_user_id(), + user_channel_id: channel.get_user_id(), confirmations_required: channel.minimum_depth(), force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), is_outbound: channel.is_outbound(), @@ -1326,6 +1403,22 @@ impl ChannelMana self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) } + /// Helper function that issues the channel close events + fn issue_channel_close_events(&self, channel: &Channel, closure_reason: ClosureReason) { + let mut pending_events_lock = self.pending_events.lock().unwrap(); + match channel.unbroadcasted_funding() { + Some(transaction) => { + pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction }) + }, + None => {}, + } + pending_events_lock.push(events::Event::ChannelClosed { + channel_id: channel.channel_id(), + user_channel_id: channel.get_user_id(), + reason: closure_reason + }); + } + fn close_channel_internal(&self, channel_id: &[u8; 32], target_feerate_sats_per_1000_weight: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -1352,7 +1445,7 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key()); + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key()); if is_permanent { remove_channel!(channel_state, chan_entry); break result; @@ -1372,12 +1465,7 @@ impl ChannelMana msg: channel_update }); } - if let Ok(mut pending_events_lock) = self.pending_events.lock() { - pending_events_lock.push(events::Event::ChannelClosed { - channel_id: *channel_id, - reason: ClosureReason::HolderForceClosed - }); - } + self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed); } break Ok(()); }, @@ -1468,13 +1556,12 @@ impl ChannelMana if let Some(short_id) = chan.get().get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - let mut pending_events_lock = self.pending_events.lock().unwrap(); if peer_node_id.is_some() { if let Some(peer_msg) = peer_msg { - pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() } }); + self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() }); } } else { - pending_events_lock.push(events::Event::ChannelClosed { channel_id: *channel_id, reason: ClosureReason::HolderForceClosed }); + self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed); } chan.remove_entry().1 } else { @@ -1878,7 +1965,7 @@ impl ChannelMana } // Only public for testing, this should otherwise never be called direcly - pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, mpp_id: MppId, keysend_preimage: &Option) -> Result<(), APIError> { + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let prng_seed = self.keys_manager.get_secure_random_bytes(); let session_priv_bytes = self.keys_manager.get_secure_random_bytes(); @@ -1893,9 +1980,6 @@ impl ChannelMana let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); - let sessions = pending_outbounds.entry(mpp_id).or_insert(HashSet::new()); - assert!(sessions.insert(session_priv_bytes)); let err: Result<(), _> = loop { let mut channel_lock = self.channel_state.lock().unwrap(); @@ -1913,12 +1997,27 @@ impl ChannelMana if !chan.get().is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()}); } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - mpp_id, - }, onion_packet, &self.logger), channel_state, chan) + let send_res = break_chan_entry!(self, chan.get_mut().send_htlc_and_commit( + htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + payment_id, + }, onion_packet, &self.logger), + channel_state, chan); + + let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); + let payment = pending_outbounds.entry(payment_id).or_insert_with(|| PendingOutboundPayment::Retryable { + session_privs: HashSet::new(), + pending_amt_msat: 0, + payment_hash: *payment_hash, + payment_secret: *payment_secret, + starting_block_height: self.best_block.read().unwrap().height(), + total_msat: total_value, + }); + assert!(payment.insert(session_priv_bytes, path.last().unwrap().fee_msat)); + + send_res } { Some((update_add, commitment_signed, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { @@ -1997,11 +2096,11 @@ impl ChannelMana /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature /// bit set (either as required or as available). If multiple paths are present in the Route, /// we assume the invoice had the basic_mpp feature set. - pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result<(), PaymentSendFailure> { - self.send_payment_internal(route, payment_hash, payment_secret, None) + pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result { + self.send_payment_internal(route, payment_hash, payment_secret, None, None, None) } - fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option) -> Result<(), PaymentSendFailure> { + fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option, payment_id: Option, recv_value_msat: Option) -> Result { if route.paths.len() < 1 { return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); } @@ -2017,7 +2116,7 @@ impl ChannelMana let mut total_value = 0; let our_node_id = self.get_our_node_id(); let mut path_errs = Vec::with_capacity(route.paths.len()); - let mpp_id = MppId(self.keys_manager.get_secure_random_bytes()); + let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) }; 'path_check: for path in route.paths.iter() { if path.len() < 1 || path.len() > 20 { path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); @@ -2035,11 +2134,15 @@ impl ChannelMana if path_errs.iter().any(|e| e.is_err()) { return Err(PaymentSendFailure::PathParameterError(path_errs)); } + if let Some(amt_msat) = recv_value_msat { + debug_assert!(amt_msat >= total_value); + total_value = amt_msat; + } let cur_height = self.best_block.read().unwrap().height() + 1; let mut results = Vec::new(); for path in route.paths.iter() { - results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, mpp_id, &keysend_preimage)); + results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage)); } let mut has_ok = false; let mut has_err = false; @@ -2059,8 +2162,56 @@ impl ChannelMana } else if has_err { Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect())) } else { - Ok(()) + Ok(payment_id) + } + } + + /// Retries a payment along the given [`Route`]. + /// + /// Errors returned are a superset of those returned from [`send_payment`], so see + /// [`send_payment`] documentation for more details on errors. This method will also error if the + /// retry amount puts the payment more than 10% over the payment's total amount, or if the payment + /// for the given `payment_id` cannot be found (likely due to timeout or success). + /// + /// [`send_payment`]: [`ChannelManager::send_payment`] + pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> { + const RETRY_OVERFLOW_PERCENTAGE: u64 = 10; + for path in route.paths.iter() { + if path.len() == 0 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "length-0 path in route".to_string() + })) + } } + + let (total_msat, payment_hash, payment_secret) = { + let outbounds = self.pending_outbound_payments.lock().unwrap(); + if let Some(payment) = outbounds.get(&payment_id) { + match payment { + PendingOutboundPayment::Retryable { + total_msat, payment_hash, payment_secret, pending_amt_msat, .. + } => { + let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum(); + if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string() + })) + } + (*total_msat, *payment_hash, *payment_secret) + }, + PendingOutboundPayment::Legacy { .. } => { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string() + })) + } + } + } else { + return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError { + err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)), + })) + } + }; + return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ()) } /// Send a spontaneous payment, which is a payment that does not require the recipient to have @@ -2077,14 +2228,14 @@ impl ChannelMana /// Note that `route` must have exactly one path. /// /// [`send_payment`]: Self::send_payment - pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result { + pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> { let preimage = match payment_preimage { Some(p) => p, None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()), }; let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner()); - match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) { - Ok(()) => Ok(payment_hash), + match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) { + Ok(payment_id) => Ok((payment_hash, payment_id)), Err(e) => Err(e) } } @@ -2100,7 +2251,7 @@ impl ChannelMana (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger) .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None) + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None) } else { unreachable!(); }) , chan) }, @@ -2442,7 +2593,7 @@ impl ChannelMana channel_state.short_to_id.remove(&short_id); } // ChannelClosed event is generated by handle_error for us. - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; @@ -2699,7 +2850,7 @@ impl ChannelMana let ret_err = match res { Ok(Some((update_fee, commitment_signed, monitor_update))) => { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id); + let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), chan_id); if drop { retain_channel = false; } res } else { @@ -2875,28 +3026,26 @@ impl ChannelMana self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); }, - HTLCSource::OutboundRoute { session_priv, mpp_id, path, .. } => { + HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) { - if sessions.get_mut().remove(&session_priv_bytes) { + if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { + if payment.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) { self.pending_events.lock().unwrap().push( events::Event::PaymentPathFailed { payment_hash, rejected_by_dest: false, network_update: None, - all_paths_failed: sessions.get().len() == 0, + all_paths_failed: payment.get().remaining_parts() == 0, path: path.clone(), + short_channel_id: None, #[cfg(test)] error_code: None, #[cfg(test)] error_data: None, } ); - if sessions.get().len() == 0 { - sessions.remove(); - } } } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); @@ -2922,19 +3071,18 @@ impl ChannelMana // from block_connected which may run during initialization prior to the chain_monitor // being fully configured. See the docs for `ChannelManagerReadArgs` for more. match source { - HTLCSource::OutboundRoute { ref path, session_priv, mpp_id, .. } => { + HTLCSource::OutboundRoute { ref path, session_priv, payment_id, .. } => { let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); let mut all_paths_failed = false; - if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(mpp_id) { - if !sessions.get_mut().remove(&session_priv_bytes) { + if let hash_map::Entry::Occupied(mut sessions) = outbounds.entry(payment_id) { + if !sessions.get_mut().remove(&session_priv_bytes, path.last().unwrap().fee_msat) { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; } - if sessions.get().len() == 0 { + if sessions.get().remaining_parts() == 0 { all_paths_failed = true; - sessions.remove(); } } else { log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); @@ -2945,9 +3093,9 @@ impl ChannelMana match &onion_error { &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] - let (network_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] - let (network_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); // TODO: If we decided to blame ourselves (or one of our channels) in // process_onion_failure we should close that channel as it implies our // next-hop is needlessly blaming us! @@ -2958,6 +3106,7 @@ impl ChannelMana network_update, all_paths_failed, path: path.clone(), + short_channel_id, #[cfg(test)] error_code: onion_error_code, #[cfg(test)] @@ -2985,6 +3134,7 @@ impl ChannelMana network_update: None, all_paths_failed, path: path.clone(), + short_channel_id: Some(path.first().unwrap().short_channel_id), #[cfg(test)] error_code: Some(*failure_code), #[cfg(test)] @@ -3181,17 +3331,21 @@ impl ChannelMana fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { match source { - HTLCSource::OutboundRoute { session_priv, mpp_id, .. } => { + HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { mem::drop(channel_state_lock); let mut session_priv_bytes = [0; 32]; session_priv_bytes.copy_from_slice(&session_priv[..]); let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - let found_payment = if let Some(mut sessions) = outbounds.remove(&mpp_id) { - sessions.remove(&session_priv_bytes) + let found_payment = if let Some(mut sessions) = outbounds.remove(&payment_id) { + sessions.remove(&session_priv_bytes, path.last().unwrap().fee_msat) } else { false }; if found_payment { + let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); self.pending_events.lock().unwrap().push( - events::Event::PaymentSent { payment_preimage } + events::Event::PaymentSent { + payment_preimage, + payment_hash: payment_hash + } ); } else { log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0)); @@ -3254,27 +3408,7 @@ impl ChannelMana self.our_network_pubkey.clone() } - /// Restores a single, given channel to normal operation after a - /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update - /// operation. - /// - /// All ChannelMonitor updates up to and including highest_applied_update_id must have been - /// fully committed in every copy of the given channels' ChannelMonitors. - /// - /// Note that there is no effect to calling with a highest_applied_update_id other than the - /// current latest ChannelMonitorUpdate and one call to this function after multiple - /// ChannelMonitorUpdateErr::TemporaryFailures is fine. The highest_applied_update_id field - /// exists largely only to prevent races between this and concurrent update_monitor calls. - /// - /// Thus, the anticipated use is, at a high level: - /// 1) You register a chain::Watch with this ChannelManager, - /// 2) it stores each update to disk, and begins updating any remote (eg watchtower) copies of - /// said ChannelMonitors as it can, returning ChannelMonitorUpdateErr::TemporaryFailures - /// any time it cannot do so instantly, - /// 3) update(s) are applied to each remote copy of a ChannelMonitor, - /// 4) once all remote copies are updated, you call this function with the update_id that - /// completed, and once it is the latest the Channel will be re-enabled. - pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { + fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let chan_restoration_res; @@ -3289,8 +3423,8 @@ impl ChannelMana return; } - let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger); - let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() { + let updates = channel.get_mut().monitor_updating_restored(&self.logger); + let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() { // We only send a channel_update in the case where we are just now sending a // funding_locked and the channel is in a usable state. Further, we rely on the // normal announcement_signatures process to send a channel_update for public @@ -3300,11 +3434,12 @@ impl ChannelMana msg: self.get_channel_update_for_unicast(channel.get()).unwrap(), }) } else { None }; - chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked); + // TODO: Handle updates.finalized_claimed_htlcs! + chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked); if let Some(upd) = channel_update { channel_state.pending_msg_events.push(upd); } - pending_failures + updates.failed_htlcs }; post_handle_chan_restoration!(self, chan_restoration_res); for failure in pending_failures.drain(..) { @@ -3395,7 +3530,7 @@ impl ChannelMana // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't // accepted payment from yet. We do, however, need to wait to send our funding_locked // until we have persisted our monitor. - chan.monitor_update_failed(false, false, Vec::new(), Vec::new()); + chan.monitor_update_failed(false, false, Vec::new(), Vec::new(), Vec::new()); }, } } @@ -3431,7 +3566,16 @@ impl ChannelMana Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); + let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); + } + } + return res } funding_tx }, @@ -3504,7 +3648,7 @@ impl ChannelMana if let Some(monitor_update) = monitor_update { if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), chan_entry.key()); + handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, false, false, Vec::new(), Vec::new(), Vec::new(), chan_entry.key()); if is_permanent { remove_channel!(channel_state, chan_entry); break result; @@ -3574,7 +3718,7 @@ impl ChannelMana msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: msg.channel_id, reason: ClosureReason::CooperativeClosure }); + self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure); } Ok(()) } @@ -3791,26 +3935,36 @@ impl ChannelMana break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - let (commitment_update, pending_forwards, pending_failures, monitor_update, htlcs_to_fail_in) = - break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); - htlcs_to_fail = htlcs_to_fail_in; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + let raa_updates = break_chan_entry!(self, + chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); + htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; + if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) { if was_frozen_for_monitor { - assert!(commitment_update.is_none() && pending_forwards.is_empty() && pending_failures.is_empty()); + assert!(raa_updates.commitment_update.is_none()); + assert!(raa_updates.accepted_htlcs.is_empty()); + assert!(raa_updates.failed_htlcs.is_empty()); + assert!(raa_updates.finalized_claimed_htlcs.is_empty()); break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); } else { - if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures) { + if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, + RAACommitmentOrder::CommitmentFirst, false, + raa_updates.commitment_update.is_some(), + raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs) { break Err(e); } else { unreachable!(); } } } - if let Some(updates) = commitment_update { + if let Some(updates) = raa_updates.commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: counterparty_node_id.clone(), updates, }); } - break Ok((pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"), chan.get().get_funding_txo().unwrap())) + break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + chan.get().get_short_channel_id() + .expect("RAA should only work on a short-id-available channel"), + chan.get().get_funding_txo().unwrap())) }, hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -3970,7 +4124,8 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } }, - MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => { + MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | + MonitorEvent::UpdateFailed(funding_outpoint) => { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let by_id = &mut channel_state.by_id; @@ -3986,7 +4141,12 @@ impl ChannelMana msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::CommitmentTxConfirmed }); + let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event { + ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() } + } else { + ClosureReason::CommitmentTxConfirmed + }; + self.issue_channel_close_events(&chan, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { @@ -3995,6 +4155,9 @@ impl ChannelMana }); } }, + MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => { + self.channel_monitor_updated(&funding_txo, monitor_update_id); + }, } } @@ -4005,6 +4168,14 @@ impl ChannelMana has_pending_monitor_events } + /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without + /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor + /// update events as a separate process method here. + #[cfg(feature = "fuzztarget")] + pub fn process_monitor_events(&self) { + self.process_pending_monitor_events(); + } + /// Check the holding cell in each channel and free any pending HTLCs in them if possible. /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor /// update was applied. @@ -4033,7 +4204,7 @@ impl ChannelMana if let Some((commitment_update, monitor_update)) = commitment_opt { if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { has_monitor_update = true; - let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id); + let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), Vec::new(), channel_id); handle_errors.push((chan.get_counterparty_node_id(), res)); if close_channel { return false; } } else { @@ -4102,12 +4273,7 @@ impl ChannelMana }); } - if let Ok(mut pending_events_lock) = self.pending_events.lock() { - pending_events_lock.push(events::Event::ChannelClosed { - channel_id: *channel_id, - reason: ClosureReason::CooperativeClosure - }); - } + self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure); log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transaction(&tx); @@ -4261,6 +4427,11 @@ impl ChannelMana self.process_pending_events(&event_handler); events.into_inner() } + + #[cfg(test)] + pub fn has_pending_payments(&self) -> bool { + !self.pending_outbound_payments.lock().unwrap().is_empty() + } } impl MessageSendEventsProvider for ChannelManager @@ -4436,6 +4607,16 @@ where payment_secrets.retain(|_, inbound_payment| { inbound_payment.expiry_time > header.time as u64 }); + + let mut outbounds = self.pending_outbound_payments.lock().unwrap(); + outbounds.retain(|_, payment| { + const PAYMENT_EXPIRY_BLOCKS: u32 = 3; + if payment.remaining_parts() != 0 { return true } + if let PendingOutboundPayment::Retryable { starting_block_height, .. } = payment { + return *starting_block_height + PAYMENT_EXPIRY_BLOCKS > height + } + true + }); } fn get_relevant_txids(&self) -> Vec { @@ -4529,7 +4710,7 @@ where msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: channel.channel_id(), reason: ClosureReason::CommitmentTxConfirmed }); + self.issue_channel_close_events(channel, ClosureReason::CommitmentTxConfirmed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), action: msgs::ErrorAction::SendErrorMessage { msg: e }, @@ -4720,7 +4901,7 @@ impl msg: update }); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer }); + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); false } else { true @@ -4735,7 +4916,7 @@ impl if let Some(short_id) = chan.get_short_channel_id() { short_to_id.remove(&short_id); } - self.pending_events.lock().unwrap().push(events::Event::ChannelClosed { channel_id: chan.channel_id(), reason: ClosureReason::DisconnectedPeer }); + self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer); return false; } else { no_channels_remain = false; @@ -5079,23 +5260,23 @@ impl Readable for HTLCSource { let mut session_priv: ::util::ser::OptionDeserWrapper = ::util::ser::OptionDeserWrapper(None); let mut first_hop_htlc_msat: u64 = 0; let mut path = Some(Vec::new()); - let mut mpp_id = None; + let mut payment_id = None; read_tlv_fields!(reader, { (0, session_priv, required), - (1, mpp_id, option), + (1, payment_id, option), (2, first_hop_htlc_msat, required), (4, path, vec_type), }); - if mpp_id.is_none() { - // For backwards compat, if there was no mpp_id written, use the session_priv bytes + if payment_id.is_none() { + // For backwards compat, if there was no payment_id written, use the session_priv bytes // instead. - mpp_id = Some(MppId(*session_priv.0.unwrap().as_ref())); + payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref())); } Ok(HTLCSource::OutboundRoute { session_priv: session_priv.0.unwrap(), first_hop_htlc_msat: first_hop_htlc_msat, path: path.unwrap(), - mpp_id: mpp_id.unwrap(), + payment_id: payment_id.unwrap(), }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), @@ -5107,12 +5288,12 @@ impl Readable for HTLCSource { impl Writeable for HTLCSource { fn write(&self, writer: &mut W) -> Result<(), ::io::Error> { match self { - HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, mpp_id } => { + HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => { 0u8.write(writer)?; - let mpp_id_opt = Some(mpp_id); + let payment_id_opt = Some(payment_id); write_tlv_fields!(writer, { (0, session_priv, required), - (1, mpp_id_opt, option), + (1, payment_id_opt, option), (2, first_hop_htlc_msat, required), (4, path, vec_type), }); @@ -5157,6 +5338,20 @@ impl_writeable_tlv_based!(PendingInboundPayment, { (8, min_value_msat, required), }); +impl_writeable_tlv_based_enum!(PendingOutboundPayment, + (0, Legacy) => { + (0, session_privs, required), + }, + (2, Retryable) => { + (0, session_privs, required), + (2, payment_hash, required), + (4, payment_secret, option), + (6, total_msat, required), + (8, pending_amt_msat, required), + (10, starting_block_height, required), + }, +;); + impl Writeable for ChannelManager where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -5247,18 +5442,34 @@ impl Writeable f let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); // For backwards compat, write the session privs and their total length. let mut num_pending_outbounds_compat: u64 = 0; - for (_, outbounds) in pending_outbound_payments.iter() { - num_pending_outbounds_compat += outbounds.len() as u64; + for (_, outbound) in pending_outbound_payments.iter() { + num_pending_outbounds_compat += outbound.remaining_parts() as u64; } num_pending_outbounds_compat.write(writer)?; - for (_, outbounds) in pending_outbound_payments.iter() { - for outbound in outbounds.iter() { - outbound.write(writer)?; + for (_, outbound) in pending_outbound_payments.iter() { + match outbound { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + for session_priv in session_privs.iter() { + session_priv.write(writer)?; + } + } } } + // Encode without retry info for 0.0.101 compatibility. + let mut pending_outbound_payments_no_retry: HashMap> = HashMap::new(); + for (id, outbound) in pending_outbound_payments.iter() { + match outbound { + PendingOutboundPayment::Legacy { session_privs } | + PendingOutboundPayment::Retryable { session_privs, .. } => { + pending_outbound_payments_no_retry.insert(*id, session_privs.clone()); + } + } + } write_tlv_fields!(writer, { - (1, pending_outbound_payments, required), + (1, pending_outbound_payments_no_retry, required), + (3, pending_outbound_payments, required), }); Ok(()) @@ -5269,20 +5480,25 @@ impl Writeable f /// /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation /// is: -/// 1) Deserialize all stored ChannelMonitors. -/// 2) Deserialize the ChannelManager by filling in this struct and calling: -/// <(BlockHash, ChannelManager)>::read(reader, args) -/// This may result in closing some Channels if the ChannelMonitor is newer than the stored -/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted. -/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same -/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and -/// ChannelMonitor::get_funding_txo(). -/// 4) Reconnect blocks on your ChannelMonitors. -/// 5) Disconnect/connect blocks on the ChannelManager. -/// 6) Move the ChannelMonitors into your local chain::Watch. +/// 1) Deserialize all stored [`ChannelMonitor`]s. +/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling: +/// `<(BlockHash, ChannelManager)>::read(reader, args)` +/// This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored +/// [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted. +/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the +/// same way you would handle a [`chain::Filter`] call using +/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`]. +/// 4) Reconnect blocks on your [`ChannelMonitor`]s. +/// 5) Disconnect/connect blocks on the [`ChannelManager`]. +/// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk. +/// Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you +/// will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in +/// the next step. +/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a +/// [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`]. /// -/// Note that the ordering of #4-6 is not of importance, however all three must occur before you -/// call any other methods on the newly-deserialized ChannelManager. +/// Note that the ordering of #4-7 is not of importance, however all four must occur before you +/// call any other methods on the newly-deserialized [`ChannelManager`]. /// /// Note that because some channels may be closed during deserialization, it is critical that you /// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to @@ -5290,6 +5506,8 @@ impl Writeable f /// broadcast), and then later deserialize a newer version of the same ChannelManager (which will /// not force-close the same channels but consider them live), you may end up revoking a state for /// which you've already broadcasted the transaction. +/// +/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -5429,6 +5647,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger); channel_closures.push(events::Event::ChannelClosed { channel_id: channel.channel_id(), + user_channel_id: channel.get_user_id(), reason: ClosureReason::OutdatedChannelManager }); } else { @@ -5496,6 +5715,16 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> None => continue, } } + if forward_htlcs_count > 0 { + // If we have pending HTLCs to forward, assume we either dropped a + // `PendingHTLCsForwardable` or the user received it but never processed it as they + // shut down before the timer hit. Either way, set the time_forwardable to a small + // constant as enough time has likely passed that we should simply handle the forwards + // now, or at least after the user gets a chance to reconnect to our peers. + pending_events_read.push(events::Event::PendingHTLCsForwardable { + time_forwardable: Duration::from_secs(2), + }); + } let background_event_count: u64 = Readable::read(reader)?; let mut pending_background_events_read: Vec = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::())); @@ -5518,21 +5747,33 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?; - let mut pending_outbound_payments_compat: HashMap> = + let mut pending_outbound_payments_compat: HashMap = HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32)); for _ in 0..pending_outbound_payments_count_compat { let session_priv = Readable::read(reader)?; - if pending_outbound_payments_compat.insert(MppId(session_priv), [session_priv].iter().cloned().collect()).is_some() { + let payment = PendingOutboundPayment::Legacy { + session_privs: [session_priv].iter().cloned().collect() + }; + if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() { return Err(DecodeError::InvalidValue) }; } + // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients. + let mut pending_outbound_payments_no_retry: Option>> = None; let mut pending_outbound_payments = None; read_tlv_fields!(reader, { - (1, pending_outbound_payments, option), + (1, pending_outbound_payments_no_retry, option), + (3, pending_outbound_payments, option), }); - if pending_outbound_payments.is_none() { + if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); + } else if pending_outbound_payments.is_none() { + let mut outbounds = HashMap::new(); + for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() { + outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); + } + pending_outbound_payments = Some(outbounds); } let mut secp_ctx = Secp256k1::new(); @@ -5596,12 +5837,13 @@ mod tests { use bitcoin::hashes::sha256::Hash as Sha256; use core::time::Duration; use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; - use ln::channelmanager::{MppId, PaymentSendFailure}; + use ln::channelmanager::{PaymentId, PaymentSendFailure}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; use ln::msgs; use ln::msgs::ChannelMessageHandler; use routing::router::{get_keysend_route, get_route}; + use routing::scorer::Scorer; use util::errors::APIError; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; use util::test_utils; @@ -5741,17 +5983,14 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let logger = test_utils::TestLogger::new(); // First, send a partial MPP payment. - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); - let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]); - let mpp_id = MppId([42; 32]); + let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); + let payment_id = PaymentId([42; 32]); // Use the utility function send_payment_along_path to send the payment with MPP data which // indicates there are more HTLCs coming. let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap(); + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5781,7 +6020,7 @@ mod tests { expect_payment_failed!(nodes[0], our_payment_hash, true); // Send the second half of the original MPP payment. - nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, mpp_id, &None).unwrap(); + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5823,8 +6062,9 @@ mod tests { // further events will be generated for subsequence path successes. let events = nodes[0].node.get_and_clear_pending_events(); match events[0] { - Event::PaymentSent { payment_preimage: ref preimage } => { + Event::PaymentSent { payment_preimage: ref preimage, payment_hash: ref hash } => { assert_eq!(payment_preimage, *preimage); + assert_eq!(our_payment_hash, *hash); }, _ => panic!("Unexpected event"), } @@ -5842,13 +6082,14 @@ mod tests { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let logger = test_utils::TestLogger::new(); + let scorer = Scorer::new(0); // To start (1), send a regular payment but don't claim it. let expected_route = [&nodes[1]]; let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000); // Next, attempt a keysend payment and make sure it fails. - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger, &scorer).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -5876,8 +6117,8 @@ mod tests { // To start (2), send a keysend payment but don't claim it. let payment_preimage = PaymentPreimage([42; 32]); - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); - let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger, &scorer).unwrap(); + let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -5930,13 +6171,14 @@ mod tests { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); let network_graph = &nodes[0].net_graph_msg_handler.network_graph; let first_hops = nodes[0].node.list_usable_channels(); + let scorer = Scorer::new(0); let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey, Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, - nodes[0].logger).unwrap(); + nodes[0].logger, &scorer).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let mismatch_payment_hash = PaymentHash([43; 32]); - let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap(); + let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -5966,14 +6208,15 @@ mod tests { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); let network_graph = &nodes[0].net_graph_msg_handler.network_graph; let first_hops = nodes[0].node.list_usable_channels(); + let scorer = Scorer::new(0); let route = get_keysend_route(&payer_pubkey, network_graph, &payee_pubkey, Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, - nodes[0].logger).unwrap(); + nodes[0].logger, &scorer).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let test_secret = PaymentSecret([43; 32]); let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner()); - let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap(); + let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap(); check_added_monitors!(nodes[0], 1); let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -5998,12 +6241,9 @@ mod tests { let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let logger = test_utils::TestLogger::new(); // Marshall an MPP route. - let (_, payment_hash, _) = get_payment_preimage_hash!(&nodes[3]); - let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph, &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); + let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); @@ -6024,8 +6264,7 @@ mod tests { #[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))] pub mod bench { use chain::Listen; - use chain::chainmonitor::ChainMonitor; - use chain::channelmonitor::Persist; + use chain::chainmonitor::{ChainMonitor, Persist}; use chain::keysinterface::{KeysManager, InMemorySigner}; use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; use ln::features::{InitFeatures, InvoiceFeatures}; @@ -6033,6 +6272,7 @@ pub mod bench { use ln::msgs::{ChannelMessageHandler, Init}; use routing::network_graph::NetworkGraph; use routing::router::get_route; + use routing::scorer::Scorer; use util::test_utils; use util::config::UserConfig; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; @@ -6140,8 +6380,9 @@ pub mod bench { macro_rules! send_payment { ($node_a: expr, $node_b: expr) => { let usable_channels = $node_a.list_usable_channels(); + let scorer = Scorer::new(0); let route = get_route(&$node_a.get_our_node_id(), &dummy_graph, &$node_b.get_our_node_id(), Some(InvoiceFeatures::known()), - Some(&usable_channels.iter().map(|r| r).collect::>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a).unwrap(); + Some(&usable_channels.iter().map(|r| r).collect::>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a, &scorer).unwrap(); let mut payment_preimage = PaymentPreimage([0; 32]); payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());