})
}
- fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
+ fn decode_update_add_htlc_onion(
+ &self, msg: &msgs::UpdateAddHTLC
+ ) -> Result<(onion_utils::Hop, [u8; 32]), HTLCFailureMsg> {
macro_rules! return_malformed_err {
($msg: expr, $err_code: expr) => {
{
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ return Err(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
($msg: expr, $err_code: expr, $data: expr) => {
{
log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ return Err(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
channel_id: msg.channel_id,
htlc_id: msg.htlc_id,
reason: HTLCFailReason::reason($err_code, $data.to_vec())
return_err!(err_msg, err_code, &[0; 0]);
},
};
+ let (outgoing_scid, outgoing_amt_msat, outgoing_cltv_value) = match next_hop {
+ onion_utils::Hop::Forward {
+ next_hop_data: msgs::OnionHopData {
+ format: msgs::OnionHopDataFormat::NonFinalNode { short_channel_id }, amt_to_forward,
+ outgoing_cltv_value,
+ }, ..
+ } => (short_channel_id, amt_to_forward, outgoing_cltv_value),
+ // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the
+ // inbound channel's state.
+ onion_utils::Hop::Receive { .. } => return Ok((next_hop, shared_secret)),
+ onion_utils::Hop::Forward {
+ next_hop_data: msgs::OnionHopData { format: msgs::OnionHopDataFormat::FinalNode { .. }, .. }, ..
+ } => {
+ return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0; 0]);
+ }
+ };
- let pending_forward_info = match next_hop {
+ // Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
+ // can't hold the outbound peer state lock at the same time as the inbound peer state lock.
+ if let Some((err, mut code, chan_update)) = loop {
+ let id_option = self.short_to_chan_info.read().unwrap().get(&outgoing_scid).cloned();
+ let forwarding_chan_info_opt = match id_option {
+ None => { // unknown_next_peer
+ // Note that this is likely a timing oracle for detecting whether an scid is a
+ // phantom or an intercept.
+ if (self.default_configuration.accept_intercept_htlcs &&
+ fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)) ||
+ fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.genesis_hash)
+ {
+ None
+ } else {
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ }
+ },
+ Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
+ };
+ let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ }
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
+ None => {
+ // Channel was removed. The short_to_chan_info and channel_by_id maps
+ // have no consistency guarantees.
+ break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
+ },
+ Some(chan) => chan
+ };
+ if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
+ // Note that the behavior here should be identical to the above block - we
+ // should NOT reveal the existence or non-existence of a private channel if
+ // we don't allow forwards outbound over them.
+ break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
+ }
+ if chan.context.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
+ // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
+ // "refuse to forward unless the SCID alias was used", so we pretend
+ // we don't have the channel here.
+ break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
+ }
+ let chan_update_opt = self.get_channel_update_for_onion(outgoing_scid, chan).ok();
+
+ // Note that we could technically not return an error yet here and just hope
+ // that the connection is reestablished or monitor updated by the time we get
+ // around to doing the actual forward, but better to fail early if we can and
+ // hopefully an attacker trying to path-trace payments cannot make this occur
+ // on a small/per-node/per-channel scale.
+ if !chan.context.is_live() { // channel_disabled
+ // If the channel_update we're going to return is disabled (i.e. the
+ // peer has been disabled for some time), return `channel_disabled`,
+ // otherwise return `temporary_channel_failure`.
+ if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
+ break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
+ } else {
+ break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
+ }
+ }
+ if outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+ break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
+ }
+ if let Err((err, code)) = chan.htlc_satisfies_config(&msg, outgoing_amt_msat, outgoing_cltv_value) {
+ break Some((err, code, chan_update_opt));
+ }
+ chan_update_opt
+ } else {
+ if (msg.cltv_expiry as u64) < (outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
+ // We really should set `incorrect_cltv_expiry` here but as we're not
+ // forwarding over a real channel we can't generate a channel_update
+ // for it. Instead we just return a generic temporary_node_failure.
+ break Some((
+ "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+ 0x2000 | 2, None,
+ ));
+ }
+ None
+ };
+
+ let cur_height = self.best_block.read().unwrap().height() + 1;
+ // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
+ // but we want to be robust wrt to counterparty packet sanitization (see
+ // HTLC_FAIL_BACK_BUFFER rationale).
+ if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
+ break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
+ }
+ if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+ break Some(("CLTV expiry is too far in the future", 21, None));
+ }
+ // If the HTLC expires ~now, don't bother trying to forward it to our
+ // counterparty. They should fail it anyway, but we don't want to bother with
+ // the round-trips or risk them deciding they definitely want the HTLC and
+ // force-closing to ensure they get it if we're offline.
+ // We previously had a much more aggressive check here which tried to ensure
+ // our counterparty receives an HTLC which has *our* risk threshold met on it,
+ // but there is no need to do that, and since we're a bit conservative with our
+ // risk threshold it just results in failing to forward payments.
+ if (outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
+ break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
+ }
+
+ break None;
+ }
+ {
+ let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
+ if let Some(chan_update) = chan_update {
+ if code == 0x1000 | 11 || code == 0x1000 | 12 {
+ msg.amount_msat.write(&mut res).expect("Writes cannot fail");
+ }
+ else if code == 0x1000 | 13 {
+ msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
+ }
+ else if code == 0x1000 | 20 {
+ // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+ 0u16.write(&mut res).expect("Writes cannot fail");
+ }
+ (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
+ msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
+ chan_update.write(&mut res).expect("Writes cannot fail");
+ } else if code & 0x1000 == 0x1000 {
+ // If we're trying to return an error that requires a `channel_update` but
+ // we're forwarding to a phantom or intercept "channel" (i.e. cannot
+ // generate an update), just use the generic "temporary_node_failure"
+ // instead.
+ code = 0x2000 | 2;
+ }
+ return_err!(err, code, &res.0[..]);
+ }
+ Ok((next_hop, shared_secret))
+ }
+
+ fn construct_pending_htlc_status<'a>(
+ &self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32], decoded_hop: onion_utils::Hop,
+ ) -> PendingHTLCStatus {
+ macro_rules! return_err {
+ ($msg: expr, $err_code: expr, $data: expr) => {
+ {
+ log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+ return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: HTLCFailReason::reason($err_code, $data.to_vec())
+ .get_encrypted_failure_packet(&shared_secret, &None),
+ }));
+ }
+ }
+ }
+ match decoded_hop {
onion_utils::Hop::Receive(next_hop_data) => {
// OUR PAYMENT!
match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None) {
outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
})
}
- };
-
- if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info {
- // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
- // with a short_channel_id of 0. This is important as various things later assume
- // short_channel_id is non-0 in any ::Forward.
- if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
- if let Some((err, mut code, chan_update)) = loop {
- let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned();
- let forwarding_chan_info_opt = match id_option {
- None => { // unknown_next_peer
- // Note that this is likely a timing oracle for detecting whether an scid is a
- // phantom or an intercept.
- if (self.default_configuration.accept_intercept_htlcs &&
- fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) ||
- fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)
- {
- None
- } else {
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- }
- },
- Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
- };
- let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
- if peer_state_mutex_opt.is_none() {
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- }
- let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
- None => {
- // Channel was removed. The short_to_chan_info and channel_by_id maps
- // have no consistency guarantees.
- break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
- },
- Some(chan) => chan
- };
- if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
- // Note that the behavior here should be identical to the above block - we
- // should NOT reveal the existence or non-existence of a private channel if
- // we don't allow forwards outbound over them.
- break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
- }
- if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() {
- // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
- // "refuse to forward unless the SCID alias was used", so we pretend
- // we don't have the channel here.
- break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
- }
- let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
-
- // Note that we could technically not return an error yet here and just hope
- // that the connection is reestablished or monitor updated by the time we get
- // around to doing the actual forward, but better to fail early if we can and
- // hopefully an attacker trying to path-trace payments cannot make this occur
- // on a small/per-node/per-channel scale.
- if !chan.context.is_live() { // channel_disabled
- // If the channel_update we're going to return is disabled (i.e. the
- // peer has been disabled for some time), return `channel_disabled`,
- // otherwise return `temporary_channel_failure`.
- if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
- break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
- } else {
- break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
- }
- }
- if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
- break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
- }
- if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
- break Some((err, code, chan_update_opt));
- }
- chan_update_opt
- } else {
- if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
- // We really should set `incorrect_cltv_expiry` here but as we're not
- // forwarding over a real channel we can't generate a channel_update
- // for it. Instead we just return a generic temporary_node_failure.
- break Some((
- "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
- 0x2000 | 2, None,
- ));
- }
- None
- };
-
- let cur_height = self.best_block.read().unwrap().height() + 1;
- // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
- // but we want to be robust wrt to counterparty packet sanitization (see
- // HTLC_FAIL_BACK_BUFFER rationale).
- if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
- break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
- }
- if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
- break Some(("CLTV expiry is too far in the future", 21, None));
- }
- // If the HTLC expires ~now, don't bother trying to forward it to our
- // counterparty. They should fail it anyway, but we don't want to bother with
- // the round-trips or risk them deciding they definitely want the HTLC and
- // force-closing to ensure they get it if we're offline.
- // We previously had a much more aggressive check here which tried to ensure
- // our counterparty receives an HTLC which has *our* risk threshold met on it,
- // but there is no need to do that, and since we're a bit conservative with our
- // risk threshold it just results in failing to forward payments.
- if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
- break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
- }
-
- break None;
- }
- {
- let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
- if let Some(chan_update) = chan_update {
- if code == 0x1000 | 11 || code == 0x1000 | 12 {
- msg.amount_msat.write(&mut res).expect("Writes cannot fail");
- }
- else if code == 0x1000 | 13 {
- msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
- }
- else if code == 0x1000 | 20 {
- // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
- 0u16.write(&mut res).expect("Writes cannot fail");
- }
- (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
- msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
- chan_update.write(&mut res).expect("Writes cannot fail");
- } else if code & 0x1000 == 0x1000 {
- // If we're trying to return an error that requires a `channel_update` but
- // we're forwarding to a phantom or intercept "channel" (i.e. cannot
- // generate an update), just use the generic "temporary_node_failure"
- // instead.
- code = 0x2000 | 2;
- }
- return_err!(err, code, &res.0[..]);
- }
- }
}
-
- pending_forward_info
}
/// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
//but we should prevent it anyway.
- let pending_forward_info = self.decode_update_add_htlc_onion(msg);
+ let decoded_hop_res = self.decode_update_add_htlc_onion(msg);
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
.ok_or_else(|| {
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
+ let pending_forward_info = match decoded_hop_res {
+ Ok((next_hop, shared_secret)) =>
+ self.construct_pending_htlc_status(msg, shared_secret, next_hop),
+ Err(e) => PendingHTLCStatus::Fail(e)
+ };
let create_pending_htlc_status = |chan: &Channel<<SP::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
// If the update_add is completely bogus, the call will Err and we will close,
// but if we've sent a shutdown and they haven't acknowledged it yet, we just