#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
-macro_rules! secp_call {
- ( $res: expr, $err: expr ) => {
- match $res {
- Ok(key) => key,
- Err(_) => return Err($err),
- }
- };
-}
-
/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
pub struct ChannelDetails {
/// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
pub is_live: bool,
}
+/// If a payment fails to send, it can be in one of several states. This enum is returned as the
+/// Err() type describing which state the payment is in, see the description of individual enum
+/// states for more.
+#[derive(Debug)]
+pub enum PaymentSendFailure {
+ /// A parameter which was passed to send_payment was invalid, preventing us from attempting to
+ /// send the payment at all. No channel state has been changed or messages sent to peers, and
+ /// once you've changed the parameter at error, you can freely retry the payment in full.
+ ParameterError(APIError),
+ /// A parameter in a single path which was passed to send_payment was invalid, preventing us
+ /// from attempting to send the payment at all. No channel state has been changed or messages
+ /// sent to peers, and once you've changed the parameter at error, you can freely retry the
+ /// payment in full.
+ ///
+ /// The results here are ordered the same as the paths in the route object which was passed to
+ /// send_payment.
+ PathParameterError(Vec<Result<(), APIError>>),
+ /// All paths which were attempted failed to send, with no channel state change taking place.
+ /// You can freely retry the payment in full (though you probably want to do so over different
+ /// paths than the ones selected).
+ AllFailedRetrySafe(Vec<APIError>),
+ /// Some paths which were attempted failed to send, though possibly not all. At least some
+ /// paths have irrevocably committed to the HTLC and retrying the payment in full would result
+ /// in over-/re-payment.
+ ///
+ /// The results here are ordered the same as the paths in the route object which was passed to
+ /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely
+ /// retried (though there is currently no API with which to do so).
+ ///
+ /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
+ /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
+ /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel
+ /// with the latest update_id.
+ PartialFailure(Vec<Result<(), APIError>>),
+}
+
macro_rules! handle_error {
($self: ident, $internal: expr, $their_node_id: expr) => {
match $internal {
/// payment_preimage tracking (which you should already be doing as they represent "proof of
/// payment") and prevent double-sends yourself.
///
- /// May generate a SendHTLCs message event on success, which should be relayed.
+ /// May generate SendHTLCs message(s) event on success, which should be relayed.
///
- /// Raises APIError::RoutError when invalid route or forward parameter
- /// (cltv_delta, fee, node public key) is specified.
- /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates
- /// (including due to previous monitor update failure or new permanent monitor update failure).
- /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
- /// relevant updates.
+ /// Each path may have a different return value, and PaymentSendValue may return a Vec with
+ /// each entry matching the corresponding-index entry in the route paths, see
+ /// PaymentSendFailure for more info.
///
- /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed
- /// and you may wish to retry via a different route immediately.
- /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably
- /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry
- /// the payment via a different route unless you intend to pay twice!
+ /// In general, a path may raise:
+ /// * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee,
+ /// node public key) is specified.
+ /// * APIError::ChannelUnavailable if the next-hop channel is not available for updates
+ /// (including due to previous monitor update failure or new permanent monitor update
+ /// failure).
+ /// * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
+ /// relevant updates.
+ ///
+ /// Note that depending on the type of the PaymentSendFailure the HTLC may have been
+ /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
+ /// different route unless you intend to pay twice!
///
/// payment_secret is unrelated to payment_hash (or PaymentPreimage) and exists to authenticate
/// the sender to the recipient and prevent payment-probing (deanonymization) attacks. For
/// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
/// bit set (either as required or as available). If multiple paths are present in the Route,
/// we assume the invoice had the basic_mpp feature set.
- pub fn send_payment(&self, route: Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), APIError> {
- if route.paths.len() < 1 || route.paths.len() > 1 {
- return Err(APIError::RouteError{err: "We currently don't support MPP, and we need at least one path"});
+ pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<(), PaymentSendFailure> {
+ if route.paths.len() < 1 {
+ return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
}
- if route.paths[0].len() < 1 || route.paths[0].len() > 20 {
- return Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"});
+ if route.paths.len() > 10 {
+ // This limit is completely arbitrary - there aren't any real fundamental path-count
+ // limits. After we support retrying individual paths we should likely bump this, but
+ // for now more than 10 paths likely carries too much one-path failure.
+ return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
}
+ let mut total_value = 0;
let our_node_id = self.get_our_node_id();
- for (idx, hop) in route.paths[0].iter().enumerate() {
- if idx != route.paths[0].len() - 1 && hop.pubkey == our_node_id {
- return Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"});
+ let mut path_errs = Vec::with_capacity(route.paths.len());
+ 'path_check: for path in route.paths.iter() {
+ if path.len() < 1 || path.len() > 20 {
+ path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
+ continue 'path_check;
+ }
+ for (idx, hop) in path.iter().enumerate() {
+ if idx != path.len() - 1 && hop.pubkey == our_node_id {
+ path_errs.push(Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"}));
+ continue 'path_check;
+ }
}
+ total_value += path.last().unwrap().fee_msat;
+ path_errs.push(Ok(()));
+ }
+ if path_errs.iter().any(|e| e.is_err()) {
+ return Err(PaymentSendFailure::PathParameterError(path_errs));
}
-
- let (session_priv, prng_seed) = self.keys_manager.get_onion_rand();
let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let mut results = Vec::new();
+ 'path_loop: for path in route.paths.iter() {
+ macro_rules! check_res_push {
+ ($res: expr) => { match $res {
+ Ok(r) => r,
+ Err(e) => {
+ results.push(Err(e));
+ continue 'path_loop;
+ },
+ }
+ }
+ }
- let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route.paths[0], &session_priv),
- APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
- let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route.paths[0], payment_secret, cur_height)?;
- if onion_utils::route_size_insane(&onion_payloads) {
- return Err(APIError::RouteError{err: "Route size too large considering onion data"});
- }
- let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash);
+ log_trace!(self, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
+ let (session_priv, prng_seed) = self.keys_manager.get_onion_rand();
- let _ = self.total_consistency_lock.read().unwrap();
+ let onion_keys = check_res_push!(onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
+ .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"}));
+ let (onion_payloads, htlc_msat, htlc_cltv) = check_res_push!(onion_utils::build_onion_payloads(&path, total_value, payment_secret, cur_height));
+ if onion_utils::route_size_insane(&onion_payloads) {
+ check_res_push!(Err(APIError::RouteError{err: "Route size too large considering onion data"}));
+ }
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash);
- let err: Result<(), _> = loop {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let id = match channel_lock.short_to_id.get(&route.paths[0].first().unwrap().short_channel_id) {
- None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
- Some(id) => id.clone(),
- };
+ let _ = self.total_consistency_lock.read().unwrap();
- let channel_state = &mut *channel_lock;
- if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
- match {
- if chan.get().get_their_node_id() != route.paths[0].first().unwrap().pubkey {
- return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
- }
- if !chan.get().is_live() {
- return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"});
- }
- break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- path: route.paths[0].clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- }, onion_packet), channel_state, chan)
- } {
- Some((update_add, commitment_signed, monitor_update)) => {
- if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
- maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
- // Note that MonitorUpdateFailed here indicates (per function docs)
- // that we will resent the commitment update once we unfree monitor
- // updating, so we have to take special care that we don't return
- // something else in case we will resend later!
- return Err(APIError::MonitorUpdateFailed);
+ let err: Result<(), _> = loop {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
+ None => check_res_push!(Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"})),
+ Some(id) => id.clone(),
+ };
+
+ let channel_state = &mut *channel_lock;
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
+ match {
+ if chan.get().get_their_node_id() != path.first().unwrap().pubkey {
+ check_res_push!(Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}));
}
+ if !chan.get().is_live() {
+ check_res_push!(Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}));
+ }
+ break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ }, onion_packet), channel_state, chan)
+ } {
+ Some((update_add, commitment_signed, monitor_update)) => {
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
+ // Note that MonitorUpdateFailed here indicates (per function docs)
+ // that we will resend the commitment update once monitor updating
+ // is restored. Therefore, we must return an error indicating that
+ // it is unsafe to retry the payment wholesale, which we do in the
+ // next check for MonitorUpdateFailed, below.
+ check_res_push!(Err(APIError::MonitorUpdateFailed));
+ }
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: route.paths[0].first().unwrap().pubkey,
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: vec![update_add],
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- },
- });
- },
- None => {},
- }
- } else { unreachable!(); }
- return Ok(());
- };
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: path.first().unwrap().pubkey,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: vec![update_add],
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ },
+ });
+ },
+ None => {},
+ }
+ } else { unreachable!(); }
+ results.push(Ok(()));
+ continue 'path_loop;
+ };
- match handle_error!(self, err, route.paths[0].first().unwrap().pubkey) {
- Ok(_) => unreachable!(),
- Err(e) => { Err(APIError::ChannelUnavailable { err: e.err }) }
+ match handle_error!(self, err, path.first().unwrap().pubkey) {
+ Ok(_) => unreachable!(),
+ Err(e) => {
+ check_res_push!(Err(APIError::ChannelUnavailable { err: e.err }));
+ },
+ }
+ }
+ let mut has_ok = false;
+ let mut has_err = false;
+ for res in results.iter() {
+ if res.is_ok() { has_ok = true; }
+ if res.is_err() { has_err = true; }
+ if let &Err(APIError::MonitorUpdateFailed) = res {
+ // MonitorUpdateFailed is inherently unsafe to retry, so we call it a
+ // PartialFailure.
+ has_err = true;
+ has_ok = true;
+ break;
+ }
+ }
+ if has_err && has_ok {
+ Err(PaymentSendFailure::PartialFailure(results))
+ } else if has_err {
+ Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
+ } else {
+ Ok(())
}
}
/// privacy-breaking recipient-probing attacks which may reveal payment activity to
/// motivated attackers.
///
+ /// Note that the privacy concerns in (b) are not relevant in payments with a payment_secret
+ /// set. Thus, for such payments we will claim any payments which do not under-pay.
+ ///
/// May panic if called except in response to a PaymentReceived event.
pub fn claim_funds(&self, payment_preimage: PaymentPreimage, payment_secret: &Option<PaymentSecret>, expected_amount: u64) -> bool {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(payment_hash, *payment_secret));
if let Some(mut sources) = removed_source {
assert!(!sources.is_empty());
- let valid_mpp_amount = if let &Some(ref data) = &sources[0].payment_data {
+
+ // If we are claiming an MPP payment, we have to take special care to ensure that each
+ // channel exists before claiming all of the payments (inside one lock).
+ // Note that channel existance is sufficient as we should always get a monitor update
+ // which will take care of the real HTLC claim enforcement.
+ //
+ // If we find an HTLC which we would need to claim but for which we do not have a
+ // channel, we will fail all parts of the MPP payment. While we could wait and see if
+ // the sender retries the already-failed path(s), it should be a pretty rare case where
+ // we got all the HTLCs and then a channel closed while we were waiting for the user to
+ // provide the preimage, so worrying too much about the optimal handling isn't worth
+ // it.
+
+ let (is_mpp, mut valid_mpp) = if let &Some(ref data) = &sources[0].payment_data {
assert!(payment_secret.is_some());
- data.total_msat == expected_amount
+ (true, data.total_msat >= expected_amount)
} else {
assert!(payment_secret.is_none());
- false
+ (false, false)
};
+ for htlc in sources.iter() {
+ if !is_mpp || !valid_mpp { break; }
+ if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
+ valid_mpp = false;
+ }
+ }
+
+ let mut errs = Vec::new();
let mut claimed_any_htlcs = false;
for htlc in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- if !valid_mpp_amount && (htlc.value < expected_amount || htlc.value > expected_amount * 2) {
+ if (is_mpp && !valid_mpp) || (!is_mpp && (htlc.value < expected_amount || htlc.value > expected_amount * 2)) {
let mut htlc_msat_data = byte_utils::be64_to_array(htlc.value).to_vec();
let mut height_data = byte_utils::be32_to_array(self.latest_block_height.load(Ordering::Acquire) as u32).to_vec();
htlc_msat_data.append(&mut height_data);
HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_data });
} else {
- self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc.prev_hop), payment_preimage);
- claimed_any_htlcs = true;
+ match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
+ Err(Some(e)) => {
+ if let msgs::ErrorAction::IgnoreError = e.1.err.action {
+ // We got a temporary failure updating monitor, but will claim the
+ // HTLC when the monitor updating is restored (or on chain).
+ log_error!(self, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err);
+ claimed_any_htlcs = true;
+ } else { errs.push(e); }
+ },
+ Err(None) if is_mpp => unreachable!("We already checked for channel existence, we can't fail here!"),
+ Err(None) => {
+ log_warn!(self, "Channel we expected to claim an HTLC from was closed.");
+ },
+ Ok(()) => claimed_any_htlcs = true,
+ }
}
}
+
+ // Now that we've done the entire above loop in one lock, we can handle any errors
+ // which were generated.
+ channel_state.take();
+
+ for (their_node_id, err) in errs.drain(..) {
+ let res: Result<(), _> = Err(err);
+ let _ = handle_error!(self, res, their_node_id);
+ }
+
claimed_any_htlcs
} else { false }
}
- fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<ChanSigner>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
- let (their_node_id, err) = loop {
- match source {
- HTLCSource::OutboundRoute { .. } => {
- mem::drop(channel_state_lock);
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentSent {
- payment_preimage
- });
- },
- HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
- //TODO: Delay the claimed_funds relaying just like we do outbound relay!
- let channel_state = &mut *channel_state_lock;
- let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
- Some(chan_id) => chan_id.clone(),
- None => {
- // TODO: There is probably a channel manager somewhere that needs to
- // learn the preimage as the channel already hit the chain and that's
- // why it's missing.
- return
- }
- };
+ fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<ChanSigner>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
+ //TODO: Delay the claimed_funds relaying just like we do outbound relay!
+ let channel_state = &mut **channel_state_lock;
+ let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
+ Some(chan_id) => chan_id.clone(),
+ None => {
+ return Err(None)
+ }
+ };
- if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
- let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
- match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
- Ok((msgs, monitor_option)) => {
- if let Some(monitor_update) = monitor_option {
- if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
- if was_frozen_for_monitor {
- assert!(msgs.is_none());
- } else {
- break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()));
- }
- }
- }
- if let Some((msg, commitment_signed)) = msgs {
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_their_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: vec![msg],
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- }
- });
- }
- },
- Err(_e) => {
- // TODO: There is probably a channel manager somewhere that needs to
- // learn the preimage as the channel may be about to hit the chain.
- //TODO: Do something with e?
- return
- },
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
+ let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+ match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage) {
+ Ok((msgs, monitor_option)) => {
+ if let Some(monitor_update) = monitor_option {
+ if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ if was_frozen_for_monitor {
+ assert!(msgs.is_none());
+ } else {
+ return Err(Some((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
+ }
}
- } else { unreachable!(); }
+ }
+ if let Some((msg, commitment_signed)) = msgs {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_their_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: vec![msg],
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ }
+ });
+ }
+ return Ok(())
+ },
+ Err(e) => {
+ // TODO: Do something with e?
+ // This should only occur if we are claiming an HTLC at the same time as the
+ // HTLC is being failed (eg because a block is being connected and this caused
+ // an HTLC to time out). This should, of course, only occur if the user is the
+ // one doing the claiming (as it being a part of a peer claim would imply we're
+ // about to lose funds) and only if the lock in claim_funds was dropped as a
+ // previous HTLC was failed (thus not for an MPP payment).
+ debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e);
+ return Err(None)
},
}
- return;
- };
+ } else { unreachable!(); }
+ }
- mem::drop(channel_state_lock);
- let _ = handle_error!(self, err, their_node_id);
+ fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<ChanSigner>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+ match source {
+ HTLCSource::OutboundRoute { .. } => {
+ mem::drop(channel_state_lock);
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PaymentSent {
+ payment_preimage
+ });
+ },
+ HTLCSource::PreviousHopData(hop_data) => {
+ if let Err((their_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) {
+ Ok(()) => Ok(()),
+ Err(None) => {
+ // TODO: There is probably a channel monitor somewhere that needs to
+ // learn the preimage as the channel already hit the chain and that's
+ // why it's missing.
+ Ok(())
+ },
+ Err(Some(res)) => Err(res),
+ } {
+ mem::drop(channel_state_lock);
+ let res: Result<(), _> = Err(err);
+ let _ = handle_error!(self, res, their_node_id);
+ }
+ },
+ }
}
/// Gets the node_id held by this ChannelManager