+ let pending_forward_info = if next_hop_data.hmac == [0; 32] {
+ // OUR PAYMENT!
+ // final_expiry_too_soon
+ if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS) as u64 {
+ return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
+ }
+ // final_incorrect_htlc_amount
+ if next_hop_data.data.amt_to_forward > msg.amount_msat {
+ return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
+ }
+ // final_incorrect_cltv_expiry
+ if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
+ return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
+ }
+
+ // Note that we could obviously respond immediately with an update_fulfill_htlc
+ // message, however that would leak that we are the recipient of this payment, so
+ // instead we stay symmetric with the forwarding case, only responding (after a
+ // delay) once they've send us a commitment_signed!
+
+ PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+ onion_packet: None,
+ payment_hash: msg.payment_hash.clone(),
+ short_channel_id: 0,
+ incoming_shared_secret: shared_secret,
+ amt_to_forward: next_hop_data.data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
+ })
+ } else {
+ let mut new_packet_data = [0; 20*65];
+ chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
+ chacha.process(&ChannelManager::ZERO[..], &mut new_packet_data[19*65..]);
+
+ let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
+
+ let blinding_factor = {
+ let mut sha = Sha256::engine();
+ sha.input(&new_pubkey.serialize()[..]);
+ sha.input(&shared_secret);
+ Sha256::from_engine(sha).into_inner()
+ };
+
+ let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
+ Err(e)
+ } else { Ok(new_pubkey) };
+
+ let outgoing_packet = msgs::OnionPacket {
+ version: 0,
+ public_key,
+ hop_data: new_packet_data,
+ hmac: next_hop_data.hmac.clone(),
+ };
+
+ PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+ onion_packet: Some(outgoing_packet),
+ payment_hash: msg.payment_hash.clone(),
+ short_channel_id: next_hop_data.data.short_channel_id,
+ incoming_shared_secret: shared_secret,
+ amt_to_forward: next_hop_data.data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
+ })
+ };
+
+ channel_state = Some(self.channel_state.lock().unwrap());
+ if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+ if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
+ let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
+ let forwarding_id = match id_option {
+ None => { // unknown_next_peer
+ return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
+ },
+ Some(id) => id.clone(),
+ };
+ if let Some((err, code, chan_update)) = loop {
+ let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+
+ // Note that we could technically not return an error yet here and just hope
+ // that the connection is reestablished or monitor updated by the time we get
+ // around to doing the actual forward, but better to fail early if we can and
+ // hopefully an attacker trying to path-trace payments cannot make this occur
+ // on a small/per-node/per-channel scale.
+ if !chan.is_live() { // channel_disabled
+ break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
+ break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+ }
+ let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
+ if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
+ break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+ break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
+ }
+ let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ // We want to have at least HTLC_FAIL_TIMEOUT_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
+ if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + HTLC_FAIL_TIMEOUT_BLOCKS as u32 { // expiry_too_soon
+ break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+ break Some(("CLTV expiry is too far in the future", 21, None));
+ }
+ break None;
+ }
+ {
+ let mut res = Vec::with_capacity(8 + 128);
+ if let Some(chan_update) = chan_update {
+ if code == 0x1000 | 11 || code == 0x1000 | 12 {
+ res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
+ }
+ else if code == 0x1000 | 13 {
+ res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
+ }
+ else if code == 0x1000 | 20 {
+ res.extend_from_slice(&byte_utils::be16_to_array(chan_update.contents.flags));
+ }
+ res.extend_from_slice(&chan_update.encode_with_len()[..]);
+ }
+ return_err!(err, code, &res[..]);
+ }
+ }
+ }
+
+ (pending_forward_info, channel_state.unwrap())
+ }
+
+ /// only fails if the channel does not yet have an assigned short_id
+ /// May be called with channel_state already locked!
+ fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
+ let short_channel_id = match chan.get_short_channel_id() {
+ None => return Err(HandleError{err: "Channel not yet established", action: None}),
+ Some(id) => id,
+ };
+
+ let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_their_node_id().serialize()[..];
+
+ let unsigned = msgs::UnsignedChannelUpdate {
+ chain_hash: self.genesis_hash,
+ short_channel_id: short_channel_id,
+ timestamp: chan.get_channel_update_count(),
+ flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
+ cltv_expiry_delta: CLTV_EXPIRY_DELTA,
+ htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
+ fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
+ fee_proportional_millionths: chan.get_fee_proportional_millionths(),
+ excess_data: Vec::new(),
+ };
+
+ let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
+ let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
+
+ Ok(msgs::ChannelUpdate {
+ signature: sig,
+ contents: unsigned
+ })
+ }
+
+ /// Sends a payment along a given route.
+ ///
+ /// Value parameters are provided via the last hop in route, see documentation for RouteHop
+ /// fields for more info.
+ ///
+ /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
+ /// payment), we don't do anything to stop you! We always try to ensure that if the provided
+ /// next hop knows the preimage to payment_hash they can claim an additional amount as
+ /// specified in the last hop in the route! Thus, you should probably do your own
+ /// payment_preimage tracking (which you should already be doing as they represent "proof of
+ /// payment") and prevent double-sends yourself.
+ ///
+ /// May generate a SendHTLCs message event on success, which should be relayed.
+ ///
+ /// Raises APIError::RoutError when invalid route or forward parameter
+ /// (cltv_delta, fee, node public key) is specified.
+ /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates
+ /// (including due to previous monitor update failure or new permanent monitor update failure).
+ /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
+ /// relevant updates.
+ ///
+ /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed
+ /// and you may wish to retry via a different route immediately.
+ /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably
+ /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry
+ /// the payment via a different route unless you intend to pay twice!
+ pub fn send_payment(&self, route: Route, payment_hash: PaymentHash) -> Result<(), APIError> {
+ if route.hops.len() < 1 || route.hops.len() > 20 {
+ return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
+ }
+ let our_node_id = self.get_our_node_id();
+ for (idx, hop) in route.hops.iter().enumerate() {
+ if idx != route.hops.len() - 1 && hop.pubkey == our_node_id {
+ return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"});
+ }
+ }
+
+ let session_priv = self.keys_manager.get_session_key();
+
+ let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+
+ let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
+ APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
+ let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?;
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let err: Result<(), _> = loop {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+
+ let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
+ None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
+ Some(id) => id.clone(),
+ };
+
+ let channel_state = channel_lock.borrow_parts();
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
+ match {
+ if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey {
+ return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
+ }
+ if !chan.get().is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"});
+ }
+ break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ route: route.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ }, onion_packet), channel_state, chan)
+ } {
+ Some((update_add, commitment_signed, chan_monitor)) => {
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
+ // Note that MonitorUpdateFailed here indicates (per function docs)
+ // that we will resent the commitment update once we unfree monitor
+ // updating, so we have to take special care that we don't return
+ // something else in case we will resend later!
+ return Err(APIError::MonitorUpdateFailed);
+ }
+
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: route.hops.first().unwrap().pubkey,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: vec![update_add],
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ },
+ });
+ },
+ None => {},
+ }
+ } else { unreachable!(); }
+ return Ok(());
+ };
+
+ match handle_error!(self, err) {
+ Ok(_) => unreachable!(),
+ Err(e) => {
+ if let Some(msgs::ErrorAction::IgnoreError) = e.action {
+ } else {
+ log_error!(self, "Got bad keys: {}!", e.err);
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: route.hops.first().unwrap().pubkey,
+ action: e.action,
+ });
+ }
+ Err(APIError::ChannelUnavailable { err: e.err })
+ },
+ }
+ }
+
+ /// Call this upon creation of a funding transaction for the given channel.
+ ///
+ /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
+ /// or your counterparty can steal your funds!
+ ///
+ /// Panics if a funding transaction has already been provided for this channel.
+ ///
+ /// May panic if the funding_txo is duplicative with some other channel (note that this should
+ /// be trivially prevented by using unique funding transaction keys per-channel).
+ pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let (chan, msg, chan_monitor) = {
+ let (res, chan) = {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ match channel_state.by_id.remove(temporary_channel_id) {
+ Some(mut chan) => {
+ (chan.get_outbound_funding_created(funding_txo)
+ .map_err(|e| if let ChannelError::Close(msg) = e {
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None)
+ } else { unreachable!(); })
+ , chan)
+ },
+ None => return
+ }
+ };
+ match handle_error!(self, res) {
+ Ok(funding_msg) => {
+ (chan, funding_msg.0, funding_msg.1)
+ },
+ Err(e) => {
+ log_error!(self, "Got bad signatures: {}!", e.err);
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_their_node_id(),
+ action: e.action,
+ });
+ return;
+ },
+ }
+ };
+ // Because we have exclusive ownership of the channel here we can release the channel_state
+ // lock before add_update_monitor
+ if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ unimplemented!();