X-Git-Url: http://git.bitcoin.ninja/index.cgi?p=rust-lightning;a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=6ebe802b50c8e4749c84772bd2c73d12df44dbdd;hp=482c16356314df373e8ea7ee5645f5febbf175d9;hb=1bb9e64ebc6afaa21afcef4f71bb83054d9b2023;hpb=f06f9d11365360dc2add96acd7916673ea9ce383 diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 482c1635..6ebe802b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -64,7 +64,7 @@ use prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use std::io::{Cursor, Read}; -use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; +use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; #[cfg(any(test, feature = "allow_wallclock_use"))] @@ -99,6 +99,10 @@ enum PendingHTLCRouting { payment_data: msgs::FinalOnionHopData, incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed }, + ReceiveKeysend { + payment_preimage: PaymentPreimage, + incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed + }, } #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug @@ -153,14 +157,20 @@ pub(crate) struct HTLCPreviousHopData { outpoint: OutPoint, } -struct ClaimableHTLC { - prev_hop: HTLCPreviousHopData, - value: u64, +enum OnionPayload { /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a /// payment_secret which prevents path-probing attacks and can associate different HTLCs which /// are part of the same payment. - payment_data: msgs::FinalOnionHopData, + Invoice(msgs::FinalOnionHopData), + /// Contains the payer-provided preimage. + Spontaneous(PaymentPreimage), +} + +struct ClaimableHTLC { + prev_hop: HTLCPreviousHopData, cltv_expiry: u32, + value: u64, + onion_payload: OnionPayload, } /// Tracks the inbound corresponding to an outbound HTLC @@ -602,6 +612,29 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRA #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; +/// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`] +/// to better separate parameters. +#[derive(Clone, Debug, PartialEq)] +pub struct ChannelCounterparty { + /// The node_id of our counterparty + pub node_id: PublicKey, + /// The Features the channel counterparty provided upon last connection. + /// Useful for routing as it is the most up-to-date copy of the counterparty's features and + /// many routing-relevant features are present in the init context. + pub features: InitFeatures, + /// The value, in satoshis, that must always be held in the channel for our counterparty. This + /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by + /// claiming at least this value on chain. + /// + /// This value is not included in [`inbound_capacity_msat`] as it can never be spent. + /// + /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat + pub unspendable_punishment_reserve: u64, + /// Information on the fees and requirements that the counterparty requires when forwarding + /// payments to us through this channel. + pub forwarding_info: Option, +} + /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels #[derive(Clone, Debug, PartialEq)] pub struct ChannelDetails { @@ -610,6 +643,8 @@ pub struct ChannelDetails { /// Note that this means this value is *not* persistent - it can change once during the /// lifetime of the channel. pub channel_id: [u8; 32], + /// Parameters which apply to our counterparty. See individual fields for more information. + pub counterparty: ChannelCounterparty, /// The Channel's funding transaction output, if we've negotiated the funding transaction with /// our counterparty already. /// @@ -619,12 +654,6 @@ pub struct ChannelDetails { /// The position of the funding transaction in the chain. None if the funding transaction has /// not yet been confirmed and the channel fully opened. pub short_channel_id: Option, - /// The node_id of our counterparty - pub remote_network_id: PublicKey, - /// The Features the channel counterparty provided upon last connection. - /// Useful for routing as it is the most up-to-date copy of the counterparty's features and - /// many routing-relevant features are present in the init context. - pub counterparty_features: InitFeatures, /// The value, in satoshis, of this channel as appears in the funding output pub channel_value_satoshis: u64, /// The value, in satoshis, that must always be held in the channel for us. This value ensures @@ -636,15 +665,7 @@ pub struct ChannelDetails { /// This value will be `None` for outbound channels until the counterparty accepts the channel. /// /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat - pub to_self_reserve_satoshis: Option, - /// The value, in satoshis, that must always be held in the channel for our counterparty. This - /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by - /// claiming at least this value on chain. - /// - /// This value is not included in [`inbound_capacity_msat`] as it can never be spent. - /// - /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat - pub to_remote_reserve_satoshis: u64, + pub unspendable_punishment_reserve: Option, /// The user_id passed in to create_channel, or 0 if the channel was inbound. pub user_id: u64, /// The available outbound capacity for sending HTLCs to the remote peer. This does not include @@ -685,7 +706,7 @@ pub struct ChannelDetails { /// time to claim our non-HTLC-encumbered funds. /// /// This value will be `None` for outbound channels until the counterparty accepts the channel. - pub spend_csv_on_our_commitment_funds: Option, + pub force_close_spend_delay: Option, /// True if the channel was initiated (and thus funded) by us. pub is_outbound: bool, /// True if the channel is confirmed, funding_locked messages have been exchanged, and the @@ -703,9 +724,6 @@ pub struct ChannelDetails { pub is_usable: bool, /// True if this channel is (or will be) publicly-announced. pub is_public: bool, - /// Information on the fees and requirements that the counterparty requires when forwarding - /// payments to us through this channel. - pub counterparty_forwarding_info: Option, } /// If a payment fails to send, it can be in one of several states. This enum is returned as the @@ -800,7 +818,7 @@ macro_rules! convert_chan_err { $short_to_id.remove(&short_id); } let shutdown_res = $channel.force_shutdown(true); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok())) + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) }, ChannelError::CloseDelayBroadcast(msg) => { log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg); @@ -808,7 +826,7 @@ macro_rules! convert_chan_err { $short_to_id.remove(&short_id); } let shutdown_res = $channel.force_shutdown(false); - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok())) + (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok())) } } } @@ -864,7 +882,8 @@ macro_rules! handle_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok())); + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, + $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() )); (res, true) }, ChannelMonitorUpdateErr::TemporaryFailure => { @@ -1128,6 +1147,10 @@ impl ChannelMana /// /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000. + /// + /// Note that we do not check if you are currently connected to the given peer. If no + /// connection is available, the outbound `open_channel` message may fail to send, resulting in + /// the channel eventually being silently forgotten. pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, override_config: Option) -> Result<(), APIError> { if channel_value_satoshis < 1000 { return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) }); @@ -1170,30 +1193,32 @@ impl ChannelMana channel.get_holder_counterparty_selected_channel_reserve_satoshis(); res.push(ChannelDetails { channel_id: (*channel_id).clone(), + counterparty: ChannelCounterparty { + node_id: channel.get_counterparty_node_id(), + features: InitFeatures::empty(), + unspendable_punishment_reserve: to_remote_reserve_satoshis, + forwarding_info: channel.counterparty_forwarding_info(), + }, funding_txo: channel.get_funding_txo(), short_channel_id: channel.get_short_channel_id(), - remote_network_id: channel.get_counterparty_node_id(), - counterparty_features: InitFeatures::empty(), channel_value_satoshis: channel.get_value_satoshis(), - to_self_reserve_satoshis, - to_remote_reserve_satoshis, + unspendable_punishment_reserve: to_self_reserve_satoshis, inbound_capacity_msat, outbound_capacity_msat, user_id: channel.get_user_id(), confirmations_required: channel.minimum_depth(), - spend_csv_on_our_commitment_funds: channel.get_counterparty_selected_contest_delay(), + force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), is_outbound: channel.is_outbound(), is_funding_locked: channel.is_usable(), is_usable: channel.is_live(), is_public: channel.should_announce(), - counterparty_forwarding_info: channel.counterparty_forwarding_info(), }); } } let per_peer_state = self.per_peer_state.read().unwrap(); for chan in res.iter_mut() { - if let Some(peer_state) = per_peer_state.get(&chan.remote_network_id) { - chan.counterparty_features = peer_state.lock().unwrap().latest_features.clone(); + if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) { + chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone(); } } res @@ -1250,9 +1275,7 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } let chan_update = if let Some(chan) = chan_option { - if let Ok(update) = self.get_channel_update(&chan) { - Some(update) - } else { None } + self.get_channel_update_for_broadcast(&chan).ok() } else { None }; if let Some(update) = chan_update { @@ -1301,7 +1324,7 @@ impl ChannelMana }; log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..])); self.finish_force_close_channel(chan.force_shutdown(true)); - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -1424,121 +1447,141 @@ impl ChannelMana }; let pending_forward_info = if next_hop_hmac == [0; 32] { - #[cfg(test)] - { - // In tests, make sure that the initial onion pcket data is, at least, non-0. - // We could do some fancy randomness test here, but, ehh, whatever. - // This checks for the issue where you can calculate the path length given the - // onion data as all the path entries that the originator sent will be here - // as-is (and were originally 0s). - // Of course reverse path calculation is still pretty easy given naive routing - // algorithms, but this fixes the most-obvious case. - let mut next_bytes = [0; 32]; - chacha_stream.read_exact(&mut next_bytes).unwrap(); - assert_ne!(next_bytes[..], [0; 32][..]); - chacha_stream.read_exact(&mut next_bytes).unwrap(); - assert_ne!(next_bytes[..], [0; 32][..]); - } - - // OUR PAYMENT! - // final_expiry_too_soon - // We have to have some headroom to broadcast on chain if we have the preimage, so make sure we have at least - // HTLC_FAIL_BACK_BUFFER blocks to go. - // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward - // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational). - if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { - return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); - } - // final_incorrect_htlc_amount - if next_hop_data.amt_to_forward > msg.amount_msat { - return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); - } - // final_incorrect_cltv_expiry - if next_hop_data.outgoing_cltv_value != msg.cltv_expiry { - return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); - } + #[cfg(test)] + { + // In tests, make sure that the initial onion pcket data is, at least, non-0. + // We could do some fancy randomness test here, but, ehh, whatever. + // This checks for the issue where you can calculate the path length given the + // onion data as all the path entries that the originator sent will be here + // as-is (and were originally 0s). + // Of course reverse path calculation is still pretty easy given naive routing + // algorithms, but this fixes the most-obvious case. + let mut next_bytes = [0; 32]; + chacha_stream.read_exact(&mut next_bytes).unwrap(); + assert_ne!(next_bytes[..], [0; 32][..]); + chacha_stream.read_exact(&mut next_bytes).unwrap(); + assert_ne!(next_bytes[..], [0; 32][..]); + } - let payment_data = match next_hop_data.format { - msgs::OnionHopDataFormat::Legacy { .. } => None, - msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]), - msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data, - }; + // OUR PAYMENT! + // final_expiry_too_soon + // We have to have some headroom to broadcast on chain if we have the preimage, so make sure + // we have at least HTLC_FAIL_BACK_BUFFER blocks to go. + // Also, ensure that, in the case of an unknown preimage for the received payment hash, our + // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a + // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). + if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { + return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]); + } + // final_incorrect_htlc_amount + if next_hop_data.amt_to_forward > msg.amount_msat { + return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat)); + } + // final_incorrect_cltv_expiry + if next_hop_data.outgoing_cltv_value != msg.cltv_expiry { + return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); + } - if payment_data.is_none() { - return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]); - } + let routing = match next_hop_data.format { + msgs::OnionHopDataFormat::Legacy { .. } => return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]), + msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]), + msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => { + if payment_data.is_some() && keysend_preimage.is_some() { + return_err!("We don't support MPP keysend payments", 0x4000|22, &[0;0]); + } else if let Some(data) = payment_data { + PendingHTLCRouting::Receive { + payment_data: data, + incoming_cltv_expiry: msg.cltv_expiry, + } + } else if let Some(payment_preimage) = keysend_preimage { + // We need to check that the sender knows the keysend preimage before processing this + // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X + // could discover the final destination of X, by probing the adjacent nodes on the route + // with a keysend payment of identical payment hash to X and observing the processing + // time discrepancies due to a hash collision with X. + let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + if hashed_preimage != msg.payment_hash { + return_err!("Payment preimage didn't match payment hash", 0x4000|22, &[0;0]); + } - // Note that we could obviously respond immediately with an update_fulfill_htlc - // message, however that would leak that we are the recipient of this payment, so - // instead we stay symmetric with the forwarding case, only responding (after a - // delay) once they've send us a commitment_signed! + PendingHTLCRouting::ReceiveKeysend { + payment_preimage, + incoming_cltv_expiry: msg.cltv_expiry, + } + } else { + return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]); + } + }, + }; - PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { - payment_data: payment_data.unwrap(), - incoming_cltv_expiry: msg.cltv_expiry, - }, - payment_hash: msg.payment_hash.clone(), - incoming_shared_secret: shared_secret, - amt_to_forward: next_hop_data.amt_to_forward, - outgoing_cltv_value: next_hop_data.outgoing_cltv_value, - }) - } else { - let mut new_packet_data = [0; 20*65]; - let read_pos = chacha_stream.read(&mut new_packet_data).unwrap(); - #[cfg(debug_assertions)] - { - // Check two things: - // a) that the behavior of our stream here will return Ok(0) even if the TLV - // read above emptied out our buffer and the unwrap() wont needlessly panic - // b) that we didn't somehow magically end up with extra data. - let mut t = [0; 1]; - debug_assert!(chacha_stream.read(&mut t).unwrap() == 0); - } - // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we - // fill the onion hop data we'll forward to our next-hop peer. - chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]); + // Note that we could obviously respond immediately with an update_fulfill_htlc + // message, however that would leak that we are the recipient of this payment, so + // instead we stay symmetric with the forwarding case, only responding (after a + // delay) once they've send us a commitment_signed! + + PendingHTLCStatus::Forward(PendingHTLCInfo { + routing, + payment_hash: msg.payment_hash.clone(), + incoming_shared_secret: shared_secret, + amt_to_forward: next_hop_data.amt_to_forward, + outgoing_cltv_value: next_hop_data.outgoing_cltv_value, + }) + } else { + let mut new_packet_data = [0; 20*65]; + let read_pos = chacha_stream.read(&mut new_packet_data).unwrap(); + #[cfg(debug_assertions)] + { + // Check two things: + // a) that the behavior of our stream here will return Ok(0) even if the TLV + // read above emptied out our buffer and the unwrap() wont needlessly panic + // b) that we didn't somehow magically end up with extra data. + let mut t = [0; 1]; + debug_assert!(chacha_stream.read(&mut t).unwrap() == 0); + } + // Once we've emptied the set of bytes our peer gave us, encrypt 0 bytes until we + // fill the onion hop data we'll forward to our next-hop peer. + chacha_stream.chacha.process_in_place(&mut new_packet_data[read_pos..]); - let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap(); + let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap(); - let blinding_factor = { - let mut sha = Sha256::engine(); - sha.input(&new_pubkey.serialize()[..]); - sha.input(&shared_secret); - Sha256::from_engine(sha).into_inner() - }; - - let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) { - Err(e) - } else { Ok(new_pubkey) }; + let blinding_factor = { + let mut sha = Sha256::engine(); + sha.input(&new_pubkey.serialize()[..]); + sha.input(&shared_secret); + Sha256::from_engine(sha).into_inner() + }; - let outgoing_packet = msgs::OnionPacket { - version: 0, - public_key, - hop_data: new_packet_data, - hmac: next_hop_hmac.clone(), - }; + let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) { + Err(e) + } else { Ok(new_pubkey) }; - let short_channel_id = match next_hop_data.format { - msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id, - msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, - msgs::OnionHopDataFormat::FinalNode { .. } => { - return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); - }, - }; + let outgoing_packet = msgs::OnionPacket { + version: 0, + public_key, + hop_data: new_packet_data, + hmac: next_hop_hmac.clone(), + }; - PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Forward { - onion_packet: outgoing_packet, - short_channel_id, - }, - payment_hash: msg.payment_hash.clone(), - incoming_shared_secret: shared_secret, - amt_to_forward: next_hop_data.amt_to_forward, - outgoing_cltv_value: next_hop_data.outgoing_cltv_value, - }) + let short_channel_id = match next_hop_data.format { + msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id, + msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, + msgs::OnionHopDataFormat::FinalNode { .. } => { + return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); + }, }; + PendingHTLCStatus::Forward(PendingHTLCInfo { + routing: PendingHTLCRouting::Forward { + onion_packet: outgoing_packet, + short_channel_id, + }, + payment_hash: msg.payment_hash.clone(), + incoming_shared_secret: shared_secret, + amt_to_forward: next_hop_data.amt_to_forward, + outgoing_cltv_value: next_hop_data.outgoing_cltv_value, + }) + }; + channel_state = Some(self.channel_state.lock().unwrap()); if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel @@ -1546,46 +1589,56 @@ impl ChannelMana // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); - let forwarding_id = match id_option { - None => { // unknown_next_peer - return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]); - }, - Some(id) => id.clone(), - }; if let Some((err, code, chan_update)) = loop { + let forwarding_id = match id_option { + None => { // unknown_next_peer + break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + }, + Some(id) => id.clone(), + }; + let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); + if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { + // Note that the behavior here should be identical to the above block - we + // should NOT reveal the existence or non-existence of a private channel if + // we don't allow forwards outbound over them. + break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None)); + } + // Note that we could technically not return an error yet here and just hope // that the connection is reestablished or monitor updated by the time we get // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. if !chan.is_live() { // channel_disabled - break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap()))); + break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum - break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap()))); + break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } - let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_holder_fee_base_msat(&self.fee_estimator) as u64) }); + let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64) + .and_then(|prop_fee| { (prop_fee / 1000000) + .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) }); if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient - break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap()))); + break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry - break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); + break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } let cur_height = self.best_block.read().unwrap().height() + 1; // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational) if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon - break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); + break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far break Some(("CLTV expiry is too far in the future", 21, None)); } - // In theory, we would be safe against unitentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS. - // But, to be safe against policy reception, we use a longuer delay. + // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS. + // But, to be safe against policy reception, we use a longer delay. if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 { - break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); + break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } break None; @@ -1613,9 +1666,29 @@ impl ChannelMana (pending_forward_info, channel_state.unwrap()) } - /// only fails if the channel does not yet have an assigned short_id + /// Gets the current channel_update for the given channel. This first checks if the channel is + /// public, and thus should be called whenever the result is going to be passed out in a + /// [`MessageSendEvent::BroadcastChannelUpdate`] event. + /// /// May be called with channel_state already locked! - fn get_channel_update(&self, chan: &Channel) -> Result { + fn get_channel_update_for_broadcast(&self, chan: &Channel) -> Result { + if !chan.should_announce() { + return Err(LightningError { + err: "Cannot broadcast a channel_update for a private channel".to_owned(), + action: msgs::ErrorAction::IgnoreError + }); + } + log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id())); + self.get_channel_update_for_unicast(chan) + } + + /// Gets the current channel_update for the given channel. This does not check if the channel + /// is public (only returning an Err if the channel does not yet have an assigned short_id), + /// and thus MUST NOT be called unless the recipient of the resulting message has already + /// provided evidence that they know about the existence of the channel. + /// May be called with channel_state already locked! + fn get_channel_update_for_unicast(&self, chan: &Channel) -> Result { + log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id())); let short_channel_id = match chan.get_short_channel_id() { None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}), Some(id) => id, @@ -1631,7 +1704,7 @@ impl ChannelMana cltv_expiry_delta: chan.get_cltv_expiry_delta(), htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()), - fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator), + fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(), fee_proportional_millionths: chan.get_fee_proportional_millionths(), excess_data: Vec::new(), }; @@ -1646,7 +1719,7 @@ impl ChannelMana } // Only public for testing, this should otherwise never be called direcly - pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32) -> Result<(), APIError> { + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32, keysend_preimage: &Option) -> Result<(), APIError> { log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let prng_seed = self.keys_manager.get_secure_random_bytes(); let session_priv_bytes = self.keys_manager.get_secure_random_bytes(); @@ -1654,7 +1727,7 @@ impl ChannelMana let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?; - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?; + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?; if onion_utils::route_size_insane(&onion_payloads) { return Err(APIError::RouteError{err: "Route size too large considering onion data"}); } @@ -1763,6 +1836,10 @@ impl ChannelMana /// bit set (either as required or as available). If multiple paths are present in the Route, /// we assume the invoice had the basic_mpp feature set. pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result<(), PaymentSendFailure> { + self.send_payment_internal(route, payment_hash, payment_secret, None) + } + + fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option, keysend_preimage: Option) -> Result<(), PaymentSendFailure> { if route.paths.len() < 1 { return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); } @@ -1796,7 +1873,7 @@ impl ChannelMana let cur_height = self.best_block.read().unwrap().height() + 1; let mut results = Vec::new(); for path in route.paths.iter() { - results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height)); + results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height, &keysend_preimage)); } let mut has_ok = false; let mut has_err = false; @@ -1820,6 +1897,28 @@ impl ChannelMana } } + /// Send a spontaneous payment, which is a payment that does not require the recipient to have + /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify + /// the preimage, it must be a cryptographically secure random value that no intermediate node + /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will + /// never reach the recipient. + /// + /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See + /// [`send_payment`] for more information about the risks of duplicate preimage usage. + /// + /// [`send_payment`]: Self::send_payment + pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option) -> Result { + let preimage = match payment_preimage { + Some(p) => p, + None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()), + }; + let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner()); + match self.send_payment_internal(route, payment_hash, &None, Some(preimage)) { + Ok(()) => Ok(payment_hash), + Err(e) => Err(e) + } + } + /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. fn funding_transaction_generated_intern, &Transaction) -> Result> @@ -2008,7 +2107,7 @@ impl ChannelMana if let Some(msg) = chan.get_signed_channel_announcement(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone()) { channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg, - update_msg: match self.get_channel_update(chan) { + update_msg: match self.get_channel_update_for_broadcast(chan) { Ok(msg) => msg, Err(_) => continue, }, @@ -2100,7 +2199,7 @@ impl ChannelMana } else { panic!("Stated return value requirements in send_htlc() were not met"); } - let chan_update = self.get_channel_update(chan.get()).unwrap(); + let chan_update = self.get_channel_update_for_unicast(chan.get()).unwrap(); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() } )); @@ -2172,7 +2271,7 @@ impl ChannelMana if let Some(short_id) = channel.get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok())) + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok())) }, ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; @@ -2205,9 +2304,17 @@ impl ChannelMana for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry }, - incoming_shared_secret, payment_hash, amt_to_forward, .. }, + routing, incoming_shared_secret, payment_hash, amt_to_forward, .. }, prev_funding_outpoint } => { + let (cltv_expiry, onion_payload) = match routing { + PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry } => + (incoming_cltv_expiry, OnionPayload::Invoice(payment_data)), + PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } => + (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage)), + _ => { + panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); + } + }; let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, @@ -2216,8 +2323,8 @@ impl ChannelMana incoming_packet_shared_secret: incoming_shared_secret, }, value: amt_to_forward, - payment_data: payment_data.clone(), - cltv_expiry: incoming_cltv_expiry, + cltv_expiry, + onion_payload, }; macro_rules! fail_htlc { @@ -2246,10 +2353,38 @@ impl ChannelMana let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); match payment_secrets.entry(payment_hash) { hash_map::Entry::Vacant(_) => { - log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + match claimable_htlc.onion_payload { + OnionPayload::Invoice(_) => { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + }, + OnionPayload::Spontaneous(preimage) => { + match channel_state.claimable_htlcs.entry(payment_hash) { + hash_map::Entry::Vacant(e) => { + e.insert(vec![claimable_htlc]); + new_events.push(events::Event::PaymentReceived { + payment_hash, + amt: amt_to_forward, + purpose: events::PaymentPurpose::SpontaneousPayment(preimage), + }); + }, + hash_map::Entry::Occupied(_) => { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + } + } + } + } }, hash_map::Entry::Occupied(inbound_payment) => { + let payment_data = + if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload { + data.clone() + } else { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + continue + }; if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc); @@ -2261,15 +2396,27 @@ impl ChannelMana let mut total_value = 0; let htlcs = channel_state.claimable_htlcs.entry(payment_hash) .or_insert(Vec::new()); + if htlcs.len() == 1 { + if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { + log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + continue + } + } htlcs.push(claimable_htlc); for htlc in htlcs.iter() { total_value += htlc.value; - if htlc.payment_data.total_msat != payment_data.total_msat { - log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", - log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat); - total_value = msgs::MAX_VALUE_MSAT; + match &htlc.onion_payload { + OnionPayload::Invoice(htlc_payment_data) => { + if htlc_payment_data.total_msat != payment_data.total_msat { + log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", + log_bytes!(payment_hash.0), payment_data.total_msat, htlc_payment_data.total_msat); + total_value = msgs::MAX_VALUE_MSAT; + } + if total_value >= msgs::MAX_VALUE_MSAT { break; } + }, + _ => unreachable!(), } - if total_value >= msgs::MAX_VALUE_MSAT { break; } } if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat { log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)", @@ -2280,10 +2427,12 @@ impl ChannelMana } else if total_value == payment_data.total_msat { new_events.push(events::Event::PaymentReceived { payment_hash, - payment_preimage: inbound_payment.get().payment_preimage, - payment_secret: payment_data.payment_secret, + purpose: events::PaymentPurpose::InvoicePayment { + payment_preimage: inbound_payment.get().payment_preimage, + payment_secret: payment_data.payment_secret, + user_payment_id: inbound_payment.get().user_payment_id, + }, amt: total_value, - user_payment_id: inbound_payment.get().user_payment_id, }); // Only ever generate at most one PaymentReceived // per registered payment_hash, even if it isn't @@ -2298,9 +2447,6 @@ impl ChannelMana }, }; }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); - }, HTLCForwardInfo::FailHTLC { .. } => { panic!("Got pending fail of our own HTLC"); } @@ -2375,7 +2521,7 @@ impl ChannelMana ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -2384,7 +2530,7 @@ impl ChannelMana chan.set_channel_update_status(ChannelUpdateStatus::Disabled); }, ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -2434,7 +2580,7 @@ impl ChannelMana let (failure_code, onion_failure_data) = match self.channel_state.lock().unwrap().by_id.entry(channel_id) { hash_map::Entry::Occupied(chan_entry) => { - if let Ok(upd) = self.get_channel_update(&chan_entry.get()) { + if let Ok(upd) = self.get_channel_update_for_unicast(&chan_entry.get()) { (0x1000|7, upd.encode_with_len()) } else { (0x4000|10, Vec::new()) @@ -2801,7 +2947,8 @@ impl ChannelMana pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let (mut pending_failures, chan_restoration_res) = { + let chan_restoration_res; + let mut pending_failures = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { @@ -2813,7 +2960,21 @@ impl ChannelMana } let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger); - (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked)) + let channel_update = if funding_locked.is_some() && channel.get().is_usable() && !channel.get().should_announce() { + // We only send a channel_update in the case where we are just now sending a + // funding_locked and the channel is in a usable state. Further, we rely on the + // normal announcement_signatures process to send a channel_update for public + // channels, only generating a unicast channel_update if this is a private channel. + Some(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get().get_counterparty_node_id(), + msg: self.get_channel_update_for_unicast(channel.get()).unwrap(), + }) + } else { None }; + chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked); + if let Some(upd) = channel_update { + channel_state.pending_msg_events.push(upd); + } + pending_failures }; post_handle_chan_restoration!(self, chan_restoration_res); for failure in pending_failures.drain(..) { @@ -2976,6 +3137,11 @@ impl ChannelMana node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); + } else if chan.get().is_usable() { + channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: counterparty_node_id.clone(), + msg: self.get_channel_update_for_unicast(chan.get()).unwrap(), + }); } Ok(()) }, @@ -3020,7 +3186,7 @@ impl ChannelMana self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } if let Some(chan) = chan_option { - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -3066,7 +3232,7 @@ impl ChannelMana self.tx_broadcaster.broadcast_transaction(&broadcast_tx); } if let Some(chan) = chan_option { - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -3104,7 +3270,7 @@ impl ChannelMana // want to reject the new HTLC and fail it backwards instead of forwarding. match pending_forward_info { PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => { - let reason = if let Ok(upd) = self.get_channel_update(chan) { + let reason = if let Ok(upd) = self.get_channel_update_for_unicast(chan) { onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &{ let mut res = Vec::with_capacity(8 + 128); // TODO: underspecified, follow https://github.com/lightningnetwork/lightning-rfc/issues/791 @@ -3258,6 +3424,7 @@ impl ChannelMana match channel_state.forward_htlcs.entry(match forward_info.routing { PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, PendingHTLCRouting::Receive { .. } => 0, + PendingHTLCRouting::ReceiveKeysend { .. } => 0, }) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint, @@ -3366,7 +3533,9 @@ impl ChannelMana channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(&self.our_network_key, self.get_our_node_id(), self.genesis_hash.clone(), msg), channel_state, chan), - update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state + // Note that announcement_signatures fails if the channel cannot be announced, + // so get_channel_update_for_broadcast will never fail by the time we get here. + update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(), }); }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -3396,7 +3565,13 @@ impl ChannelMana } return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id)); } - try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); + let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..]; + let msg_from_node_one = msg.contents.flags & 1 == 0; + if were_node_one == msg_from_node_one { + return Ok(NotifyOption::SkipPersist); + } else { + try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); + } }, hash_map::Entry::Vacant(_) => unreachable!() } @@ -3404,7 +3579,8 @@ impl ChannelMana } fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let (htlcs_failed_forward, need_lnd_workaround, chan_restoration_res) = { + let chan_restoration_res; + let (htlcs_failed_forward, need_lnd_workaround) = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -3419,15 +3595,27 @@ impl ChannelMana // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here. let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) = try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan); + let mut channel_update = None; if let Some(msg) = shutdown { channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown { node_id: counterparty_node_id.clone(), msg, }); + } else if chan.get().is_usable() { + // If the channel is in a usable state (ie the channel is not being shut + // down), send a unicast channel_update to our counterparty to make sure + // they have the latest channel parameters. + channel_update = Some(events::MessageSendEvent::SendChannelUpdate { + node_id: chan.get().get_counterparty_node_id(), + msg: self.get_channel_update_for_unicast(chan.get()).unwrap(), + }); } let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take(); - (htlcs_failed_forward, need_lnd_workaround, - handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked)) + chan_restoration_res = handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked); + if let Some(upd) = channel_update { + channel_state.pending_msg_events.push(upd); + } + (htlcs_failed_forward, need_lnd_workaround) }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } @@ -3524,7 +3712,7 @@ impl ChannelMana short_to_id.remove(&short_id); } failed_channels.push(chan.force_shutdown(false)); - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -3692,7 +3880,7 @@ impl ChannelMana /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This /// method may return an Err if another payment with the same payment_hash is still pending. /// - /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to + /// `user_payment_id` will be provided back in [`PaymentPurpose::InvoicePayment::user_payment_id`] events to /// allow tracking of which events correspond with which calls to this and /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events @@ -3726,7 +3914,7 @@ impl ChannelMana /// /// [`create_inbound_payment`]: Self::create_inbound_payment /// [`PaymentReceived`]: events::Event::PaymentReceived - /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id + /// [`PaymentPurpose::InvoicePayment::user_payment_id`]: events::PaymentPurpose::InvoicePayment::user_payment_id pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result { self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id) } @@ -3963,7 +4151,7 @@ where let res = f(channel); if let Ok((chan_res, mut timed_out_pending_htlcs)) = res { for (source, payment_hash) in timed_out_pending_htlcs.drain(..) { - let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe + let chan_update = self.get_channel_update_for_unicast(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 14, // expiry_too_soon, or at least it is now data: chan_update, @@ -3980,6 +4168,12 @@ where node_id: channel.get_counterparty_node_id(), msg: announcement_sigs, }); + } else if channel.is_usable() { + log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures but with private channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); + pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { + node_id: channel.get_counterparty_node_id(), + msg: self.get_channel_update_for_unicast(channel).unwrap(), + }); } else { log_trace!(self.logger, "Sending funding_locked WITHOUT announcement_signatures for {}", log_bytes!(channel.channel_id())); } @@ -3992,7 +4186,7 @@ where // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. failed_channels.push(channel.force_shutdown(true)); - if let Ok(update) = self.get_channel_update(&channel) { + if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -4182,7 +4376,7 @@ impl short_to_id.remove(&short_id); } failed_channels.push(chan.force_shutdown(true)); - if let Ok(update) = self.get_channel_update(&chan) { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); @@ -4225,6 +4419,7 @@ impl &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, + &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, &events::MessageSendEvent::SendChannelRangeQuery { .. } => false, @@ -4289,7 +4484,7 @@ impl if msg.channel_id == [0; 32] { for chan in self.list_channels() { - if chan.remote_network_id == *counterparty_node_id { + if chan.counterparty.node_id == *counterparty_node_id { // Untrusted messages from peer, we throw away the error if id points to a non-existent channel let _ = self.force_close_channel_with_peer(&chan.channel_id, Some(counterparty_node_id)); } @@ -4383,7 +4578,11 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (1, Receive) => { (0, payment_data, required), (2, incoming_cltv_expiry, required), - } + }, + (2, ReceiveKeysend) => { + (0, payment_preimage, required), + (2, incoming_cltv_expiry, required), + }, ;); impl_writeable_tlv_based!(PendingHTLCInfo, { @@ -4410,12 +4609,63 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (6, incoming_packet_shared_secret, required) }); -impl_writeable_tlv_based!(ClaimableHTLC, { - (0, prev_hop, required), - (2, value, required), - (4, payment_data, required), - (6, cltv_expiry, required), -}); +impl Writeable for ClaimableHTLC { + fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { + let payment_data = match &self.onion_payload { + OnionPayload::Invoice(data) => Some(data.clone()), + _ => None, + }; + let keysend_preimage = match self.onion_payload { + OnionPayload::Invoice(_) => None, + OnionPayload::Spontaneous(preimage) => Some(preimage.clone()), + }; + write_tlv_fields! + (writer, + { + (0, self.prev_hop, required), (2, self.value, required), + (4, payment_data, option), (6, self.cltv_expiry, required), + (8, keysend_preimage, option), + }); + Ok(()) + } +} + +impl Readable for ClaimableHTLC { + fn read(reader: &mut R) -> Result { + let mut prev_hop = ::util::ser::OptionDeserWrapper(None); + let mut value = 0; + let mut payment_data: Option = None; + let mut cltv_expiry = 0; + let mut keysend_preimage: Option = None; + read_tlv_fields! + (reader, + { + (0, prev_hop, required), (2, value, required), + (4, payment_data, option), (6, cltv_expiry, required), + (8, keysend_preimage, option) + }); + let onion_payload = match keysend_preimage { + Some(p) => { + if payment_data.is_some() { + return Err(DecodeError::InvalidValue) + } + OnionPayload::Spontaneous(p) + }, + None => { + if payment_data.is_none() { + return Err(DecodeError::InvalidValue) + } + OnionPayload::Invoice(payment_data.unwrap()) + }, + }; + Ok(Self { + prev_hop: prev_hop.0.unwrap(), + value, + onion_payload, + cltv_expiry, + }) + } +} impl_writeable_tlv_based_enum!(HTLCSource, (0, OutboundRoute) => { @@ -4862,15 +5112,23 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> #[cfg(test)] mod tests { - use ln::channelmanager::PersistenceNotifier; - use std::sync::Arc; + use bitcoin::hashes::Hash; + use bitcoin::hashes::sha256::Hash as Sha256; use core::sync::atomic::{AtomicBool, Ordering}; - use std::thread; use core::time::Duration; + use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; + use ln::channelmanager::PersistenceNotifier; + use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; - use ln::features::InitFeatures; + use ln::msgs; use ln::msgs::ChannelMessageHandler; + use routing::router::{get_keysend_route, get_route}; + use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use util::test_utils; + use std::sync::Arc; + use std::thread; + #[cfg(feature = "std")] #[test] fn test_wait_timeout() { let persistence_notifier = Arc::new(PersistenceNotifier::new()); @@ -4958,6 +5216,290 @@ mod tests { // At this point the channel info given by peers should still be the same. assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info); assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info); + + // An earlier version of handle_channel_update didn't check the directionality of the + // update message and would always update the local fee info, even if our peer was + // (spuriously) forwarding us our own channel_update. + let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..]; + let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 }; + let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 }; + + // First deliver each peers' own message, checking that the node doesn't need to be + // persisted and that its channel info remains the same. + nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update); + nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update); + assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info); + assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info); + + // Finally, deliver the other peers' message, ensuring each node needs to be persisted and + // the channel info has updated. + nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update); + nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update); + assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); + assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info); + assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info); + } + + #[test] + fn test_keysend_dup_hash_partial_mpp() { + // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as + // expected. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First, send a partial MPP payment. + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let (payment_preimage, our_payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[1]); + // Use the utility function send_payment_along_path to send the payment with MPP data which + // indicates there are more HTLCs coming. + let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match. + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None); + + // Next, send a keysend payment with the same payment_hash and make sure it fails. + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], our_payment_hash, true); + + // Send the second half of the original MPP payment. + nodes[0].node.send_payment_along_path(&route.paths[0], &our_payment_hash, &Some(payment_secret), 200_000, cur_height, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None); + + // Claim the full MPP payment. Note that we can't use a test utility like + // claim_funds_along_route because the ordering of the messages causes the second half of the + // payment to be put in the holding cell, which confuses the test utilities. So we exchange the + // lightning messages manually. + assert!(nodes[1].node.claim_funds(payment_preimage)); + check_added_monitors!(nodes[1], 2); + let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa); + check_added_monitors!(nodes[1], 1); + let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs); + check_added_monitors!(nodes[1], 1); + let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); + check_added_monitors!(nodes[0], 1); + + // There's an existing bug that generates a PaymentSent event for each MPP path, so handle that here. + let events = nodes[0].node.get_and_clear_pending_events(); + match events[0] { + Event::PaymentSent { payment_preimage: ref preimage } => { + assert_eq!(payment_preimage, *preimage); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + Event::PaymentSent { payment_preimage: ref preimage } => { + assert_eq!(payment_preimage, *preimage); + }, + _ => panic!("Unexpected event"), + } + } + + #[test] + fn test_keysend_dup_payment_hash() { + // (1): Test that a keysend payment with a duplicate payment hash to an existing pending + // outbound regular payment fails as expected. + // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment + // fails as expected. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // To start (1), send a regular payment but don't claim it. + let expected_route = [&nodes[1]]; + let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000); + + // Next, attempt a keysend payment and make sure it fails. + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, true); + + // Finally, claim the original payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); + + // To start (2), send a keysend payment but don't claim it. + let payment_preimage = PaymentPreimage([42; 32]); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &expected_route.last().unwrap().node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, &logger).unwrap(); + let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let path = vec![&nodes[1]]; + pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage)); + + // Next, attempt a regular payment and make sure it fails. + let payment_secret = PaymentSecret([43; 32]); + nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = events.drain(..).next().unwrap(); + let payment_event = SendEvent::from_event(ev); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert_eq!(updates.update_fail_htlcs.len(), 1); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); + expect_payment_failed!(nodes[0], payment_hash, true); + + // Finally, succeed the keysend payment. + claim_payment(&nodes[0], &expected_route, payment_preimage); + } + + #[test] + fn test_keysend_hash_mismatch() { + // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend + // preimage doesn't match the msg's payment hash. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let payer_pubkey = nodes[0].node.get_our_node_id(); + let payee_pubkey = nodes[1].node.get_our_node_id(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() }); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); + + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); + let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap(); + let first_hops = nodes[0].node.list_usable_channels(); + let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey, + Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, + nodes[0].logger).unwrap(); + + let test_preimage = PaymentPreimage([42; 32]); + let mismatch_payment_hash = PaymentHash([43; 32]); + let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1); + } + + #[test] + fn test_keysend_msg_with_secret_err() { + // Test that we error as expected if we receive a keysend payment that includes a payment secret. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let payer_pubkey = nodes[0].node.get_our_node_id(); + let payee_pubkey = nodes[1].node.get_our_node_id(); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known() }); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known() }); + + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); + let network_graph = nodes[0].net_graph_msg_handler.network_graph.read().unwrap(); + let first_hops = nodes[0].node.list_usable_channels(); + let route = get_keysend_route(&payer_pubkey, &network_graph, &payee_pubkey, + Some(&first_hops.iter().collect::>()), &vec![], 10000, 40, + nodes[0].logger).unwrap(); + + let test_preimage = PaymentPreimage([42; 32]); + let test_secret = PaymentSecret([43; 32]); + let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner()); + let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage)).unwrap(); + check_added_monitors!(nodes[0], 1); + + let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert_eq!(updates.update_add_htlcs.len(), 1); + assert!(updates.update_fulfill_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1); } } @@ -4975,13 +5517,13 @@ pub mod bench { use routing::router::get_route; use util::test_utils; use util::config::UserConfig; - use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::{Block, BlockHeader, Transaction, TxOut}; - use std::sync::{Arc, Mutex}; + use sync::{Arc, Mutex}; use test::Bencher; @@ -5008,7 +5550,7 @@ pub mod bench { let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash(); let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))}; - let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; + let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let mut config: UserConfig = Default::default(); config.own_channel_config.minimum_depth = 1; @@ -5058,7 +5600,19 @@ pub mod bench { Listen::block_connected(&node_b, &block, 1); node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id())); - node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id())); + let msg_events = node_a.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 2); + match msg_events[0] { + MessageSendEvent::SendFundingLocked { ref msg, .. } => { + node_b.handle_funding_locked(&node_a.get_our_node_id(), msg); + get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id()); + }, + _ => panic!(), + } + match msg_events[1] { + MessageSendEvent::SendChannelUpdate { .. } => {}, + _ => panic!(), + } let dummy_graph = NetworkGraph::new(genesis_hash);