X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=8086fe3a9601b01d74ee36829fd1c13af659ec40;hb=795aff8da5b79dee1bea8234479cdf0ff5c98118;hp=5f7e903fd201c6da81c0eadf20dd61d6f98e2dd6;hpb=32ca8ec13e0928cbb4f7067a3fb6d41f39691d1c;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5f7e903f..8086fe3a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -28,9 +28,9 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; -use ln::router::Route; -use ln::features::InitFeatures; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use ln::features::{InitFeatures, NodeFeatures}; +use ln::router::{Route, RouteHop}; use ln::msgs; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError}; @@ -68,12 +68,23 @@ use std::ops::Deref; // Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is // our payment, which we can use to decode errors or inform the user that the payment was sent. +#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug +enum PendingHTLCRouting { + Forward { + onion_packet: msgs::OnionPacket, + short_channel_id: u64, // This should be NonZero eventually when we bump MSRV + }, + Receive { + payment_data: Option, + incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed + }, +} + #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug pub(super) struct PendingHTLCInfo { - onion_packet: Option, + routing: PendingHTLCRouting, incoming_shared_secret: [u8; 32], payment_hash: PaymentHash, - short_channel_id: u64, pub(super) amt_to_forward: u64, pub(super) outgoing_cltv_value: u32, } @@ -111,12 +122,23 @@ pub(super) struct HTLCPreviousHopData { incoming_packet_shared_secret: [u8; 32], } +struct ClaimableHTLC { + prev_hop: HTLCPreviousHopData, + value: u64, + /// Filled in when the HTLC was received with a payment_secret packet, which contains a + /// total_msat (which may differ from value if this is a Multi-Path Payment) and a + /// payment_secret which prevents path-probing attacks and can associate different HTLCs which + /// are part of the same payment. + payment_data: Option, + cltv_expiry: u32, +} + /// Tracks the inbound corresponding to an outbound HTLC #[derive(Clone, PartialEq)] pub(super) enum HTLCSource { PreviousHopData(HTLCPreviousHopData), OutboundRoute { - route: Route, + path: Vec, session_priv: SecretKey, /// Technically we can recalculate this from the route, but we cache it here to avoid /// doing a double-pass on route when we get a failure back @@ -127,7 +149,7 @@ pub(super) enum HTLCSource { impl HTLCSource { pub fn dummy() -> Self { HTLCSource::OutboundRoute { - route: Route { hops: Vec::new() }, + path: Vec::new(), session_priv: SecretKey::from_slice(&[1; 32]).unwrap(), first_hop_htlc_msat: 0, } @@ -151,8 +173,11 @@ pub struct PaymentHash(pub [u8;32]); /// payment_preimage type, use to route payment between hop #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct PaymentPreimage(pub [u8;32]); +/// payment_secret type, use to authenticate sender to the receiver and tie MPP HTLCs together +#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] +pub struct PaymentSecret(pub [u8;32]); -type ShutdownResult = (Vec, Vec<(HTLCSource, PaymentHash)>); +type ShutdownResult = (Option, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel @@ -268,12 +293,12 @@ pub(super) struct ChannelHolder { /// guarantees are made about the existence of a channel with the short id here, nor the short /// ids in the PendingHTLCInfo! pub(super) forward_htlcs: HashMap>, - /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and - /// can be failed/claimed by the user + /// (payment_hash, payment_secret) -> Vec for tracking HTLCs that + /// were to us and can be failed/claimed by the user /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - pub(super) claimable_htlcs: HashMap>, + claimable_htlcs: HashMap<(PaymentHash, Option), Vec>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, @@ -368,6 +393,10 @@ pub struct ChannelManager>, our_network_key: SecretKey, + /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this + /// value increases strictly since we don't assume access to a time source. + last_node_announcement_serial: AtomicUsize, + /// The bulk of our storage will eventually be here (channels and message queues and the like). /// If we are connected to a peer we always at least have an entry here, even if no channels /// are currently open with that peer. @@ -417,15 +446,6 @@ const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_P #[allow(dead_code)] const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER; -macro_rules! secp_call { - ( $res: expr, $err: expr ) => { - match $res { - Ok(key) => key, - Err(_) => return Err($err), - } - }; -} - /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels pub struct ChannelDetails { /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, @@ -462,22 +482,78 @@ pub struct ChannelDetails { pub is_live: bool, } +/// If a payment fails to send, it can be in one of several states. This enum is returned as the +/// Err() type describing which state the payment is in, see the description of individual enum +/// states for more. +#[derive(Debug)] +pub enum PaymentSendFailure { + /// A parameter which was passed to send_payment was invalid, preventing us from attempting to + /// send the payment at all. No channel state has been changed or messages sent to peers, and + /// once you've changed the parameter at error, you can freely retry the payment in full. + ParameterError(APIError), + /// A parameter in a single path which was passed to send_payment was invalid, preventing us + /// from attempting to send the payment at all. No channel state has been changed or messages + /// sent to peers, and once you've changed the parameter at error, you can freely retry the + /// payment in full. + /// + /// The results here are ordered the same as the paths in the route object which was passed to + /// send_payment. + PathParameterError(Vec>), + /// All paths which were attempted failed to send, with no channel state change taking place. + /// You can freely retry the payment in full (though you probably want to do so over different + /// paths than the ones selected). + AllFailedRetrySafe(Vec), + /// Some paths which were attempted failed to send, though possibly not all. At least some + /// paths have irrevocably committed to the HTLC and retrying the payment in full would result + /// in over-/re-payment. + /// + /// The results here are ordered the same as the paths in the route object which was passed to + /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely + /// retried (though there is currently no API with which to do so). + /// + /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried + /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the + /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel + /// with the latest update_id. + PartialFailure(Vec>), +} + macro_rules! handle_error { - ($self: ident, $internal: expr, $their_node_id: expr, $locked_channel_state: expr) => { + ($self: ident, $internal: expr, $their_node_id: expr) => { match $internal { Ok(msg) => Ok(msg), Err(MsgHandleErrInternal { err, shutdown_finish }) => { + #[cfg(debug_assertions)] + { + // In testing, ensure there are no deadlocks where the lock is already held upon + // entering the macro. + assert!($self.channel_state.try_lock().is_ok()); + } + + let mut msg_events = Vec::with_capacity(2); + if let Some((shutdown_res, update_option)) = shutdown_finish { $self.finish_force_close_channel(shutdown_res); if let Some(update) = update_option { - $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } } + log_error!($self, "{}", err.err); if let msgs::ErrorAction::IgnoreError = err.action { - } else { $locked_channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: $their_node_id, action: err.action.clone() }); } + } else { + msg_events.push(events::MessageSendEvent::HandleError { + node_id: $their_node_id, + action: err.action.clone() + }); + } + + if !msg_events.is_empty() { + $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events); + } + // Return error in case higher-API need one Err(err) }, @@ -498,8 +574,7 @@ macro_rules! break_chan_entry { if let Some(short_id) = chan.get_short_channel_id() { $channel_state.short_to_id.remove(&short_id); } - break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) - }, + break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) }, Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } } } @@ -518,7 +593,7 @@ macro_rules! try_chan_entry { if let Some(short_id) = chan.get_short_channel_id() { $channel_state.short_to_id.remove(&short_id); } - return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())) + return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())) }, Err(ChannelError::CloseDelayBroadcast { msg, update }) => { log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg); @@ -536,11 +611,7 @@ macro_rules! try_chan_entry { ChannelMonitorUpdateErr::TemporaryFailure => {}, } } - let mut shutdown_res = chan.force_shutdown(); - if shutdown_res.0.len() >= 1 { - log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid()); - } - shutdown_res.0.clear(); + let shutdown_res = chan.force_shutdown(false); return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok())) } } @@ -568,7 +639,7 @@ macro_rules! handle_monitor_err { // splitting hairs we'd prefer to claim payments that were to us, but we haven't // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. - let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok())); + let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok())); res }, ChannelMonitorUpdateErr::TemporaryFailure => { @@ -665,6 +736,8 @@ impl ChannelMan }), our_network_key: keys_manager.get_node_secret(), + last_node_announcement_serial: AtomicUsize::new(0), + per_peer_state: RwLock::new(HashMap::new()), pending_events: Mutex::new(Vec::new()), @@ -814,14 +887,17 @@ impl ChannelMan #[inline] fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { - let (local_txn, mut failed_htlcs) = shutdown_res; - log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len()); + let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res; + log_trace!(self, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } - for tx in local_txn { - log_trace!(self, "Broadcast onchain {}", log_tx!(tx)); - self.tx_broadcaster.broadcast_transaction(&tx); + if let Some(funding_txo) = funding_txo_option { + // There isn't anything we can do if we get an update failure - we're already + // force-closing. The monitor update on the required in-memory copy should broadcast + // the latest local state, which is the best we can do anyway. Thus, it is safe to + // ignore the result here. + let _ = self.monitor.update_monitor(funding_txo, monitor_update); } } @@ -843,7 +919,7 @@ impl ChannelMan } }; log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..])); - self.finish_force_close_channel(chan.force_shutdown()); + self.finish_force_close_channel(chan.force_shutdown(true)); if let Ok(update) = self.get_channel_update(&chan) { let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { @@ -975,15 +1051,23 @@ impl ChannelMan return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry)); } + let payment_data = match next_hop_data.format { + msgs::OnionHopDataFormat::Legacy { .. } => None, + msgs::OnionHopDataFormat::NonFinalNode { .. } => return_err!("Got non final data with an HMAC of 0", 0x4000 | 22, &[0;0]), + msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data, + }; + // Note that we could obviously respond immediately with an update_fulfill_htlc // message, however that would leak that we are the recipient of this payment, so // instead we stay symmetric with the forwarding case, only responding (after a // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(PendingHTLCInfo { - onion_packet: None, + routing: PendingHTLCRouting::Receive { + payment_data, + incoming_cltv_expiry: msg.cltv_expiry, + }, payment_hash: msg.payment_hash.clone(), - short_channel_id: 0, incoming_shared_secret: shared_secret, amt_to_forward: next_hop_data.amt_to_forward, outgoing_cltv_value: next_hop_data.outgoing_cltv_value, @@ -1027,15 +1111,17 @@ impl ChannelMan let short_channel_id = match next_hop_data.format { msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id, msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id, - msgs::OnionHopDataFormat::FinalNode => { + msgs::OnionHopDataFormat::FinalNode { .. } => { return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]); }, }; PendingHTLCStatus::Forward(PendingHTLCInfo { - onion_packet: Some(outgoing_packet), + routing: PendingHTLCRouting::Forward { + onion_packet: outgoing_packet, + short_channel_id: short_channel_id, + }, payment_hash: msg.payment_hash.clone(), - short_channel_id: short_channel_id, incoming_shared_secret: shared_secret, amt_to_forward: next_hop_data.amt_to_forward, outgoing_cltv_value: next_hop_data.outgoing_cltv_value, @@ -1043,8 +1129,11 @@ impl ChannelMan }; channel_state = Some(self.channel_state.lock().unwrap()); - if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { - if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here + if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { + // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel + // with a short_channel_id of 0. This is important as various things later assume + // short_channel_id is non-0 in any ::Forward. + if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned(); let forwarding_id = match id_option { None => { // unknown_next_peer @@ -1074,8 +1163,9 @@ impl ChannelMan break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); } let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; - // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration - if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon + // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty + // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational) + if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far @@ -1118,7 +1208,7 @@ impl ChannelMan let unsigned = msgs::UnsignedChannelUpdate { chain_hash: self.genesis_hash, short_channel_id: short_channel_id, - timestamp: chan.get_channel_update_count(), + timestamp: chan.get_update_time_counter(), flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1), cltv_expiry_delta: CLTV_EXPIRY_DELTA, htlc_minimum_msat: chan.get_our_htlc_minimum_msat(), @@ -1136,61 +1226,24 @@ impl ChannelMan }) } - /// Sends a payment along a given route. - /// - /// Value parameters are provided via the last hop in route, see documentation for RouteHop - /// fields for more info. - /// - /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative - /// payment), we don't do anything to stop you! We always try to ensure that if the provided - /// next hop knows the preimage to payment_hash they can claim an additional amount as - /// specified in the last hop in the route! Thus, you should probably do your own - /// payment_preimage tracking (which you should already be doing as they represent "proof of - /// payment") and prevent double-sends yourself. - /// - /// May generate a SendHTLCs message event on success, which should be relayed. - /// - /// Raises APIError::RoutError when invalid route or forward parameter - /// (cltv_delta, fee, node public key) is specified. - /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates - /// (including due to previous monitor update failure or new permanent monitor update failure). - /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the - /// relevant updates. - /// - /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed - /// and you may wish to retry via a different route immediately. - /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably - /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry - /// the payment via a different route unless you intend to pay twice! - pub fn send_payment(&self, route: Route, payment_hash: PaymentHash) -> Result<(), APIError> { - if route.hops.len() < 1 || route.hops.len() > 20 { - return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"}); - } - let our_node_id = self.get_our_node_id(); - for (idx, hop) in route.hops.iter().enumerate() { - if idx != route.hops.len() - 1 && hop.pubkey == our_node_id { - return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"}); - } - } - + // Only public for testing, this should otherwise never be called direcly + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32) -> Result<(), APIError> { + log_trace!(self, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); let (session_priv, prng_seed) = self.keys_manager.get_onion_rand(); - let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; - - let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv), - APIError::RouteError{err: "Pubkey along hop was maliciously selected"}); - let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?; + let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) + .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?; + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?; if onion_utils::route_size_insane(&onion_payloads) { return Err(APIError::RouteError{err: "Route size too large considering onion data"}); } - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash); + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_lock = self.channel_state.lock().unwrap(); let err: Result<(), _> = loop { - - let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) { + let mut channel_lock = self.channel_state.lock().unwrap(); + let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), Some(id) => id.clone(), }; @@ -1198,14 +1251,14 @@ impl ChannelMan let channel_state = &mut *channel_lock; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { match { - if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey { + if chan.get().get_their_node_id() != path.first().unwrap().pubkey { return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); } if !chan.get().is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}); } break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - route: route.clone(), + path: path.clone(), session_priv: session_priv.clone(), first_hop_htlc_msat: htlc_msat, }, onion_packet), channel_state, chan) @@ -1214,14 +1267,15 @@ impl ChannelMan if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); // Note that MonitorUpdateFailed here indicates (per function docs) - // that we will resent the commitment update once we unfree monitor - // updating, so we have to take special care that we don't return - // something else in case we will resend later! + // that we will resend the commitment update once monitor updating + // is restored. Therefore, we must return an error indicating that + // it is unsafe to retry the payment wholesale, which we do in the + // send_payment check for MonitorUpdateFailed, below. return Err(APIError::MonitorUpdateFailed); } channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: route.hops.first().unwrap().pubkey, + node_id: path.first().unwrap().pubkey, updates: msgs::CommitmentUpdate { update_add_htlcs: vec![update_add], update_fulfill_htlcs: Vec::new(), @@ -1238,9 +1292,108 @@ impl ChannelMan return Ok(()); }; - match handle_error!(self, err, route.hops.first().unwrap().pubkey, channel_lock) { + match handle_error!(self, err, path.first().unwrap().pubkey) { Ok(_) => unreachable!(), - Err(e) => { Err(APIError::ChannelUnavailable { err: e.err }) } + Err(e) => { + Err(APIError::ChannelUnavailable { err: e.err }) + }, + } + } + + /// Sends a payment along a given route. + /// + /// Value parameters are provided via the last hop in route, see documentation for RouteHop + /// fields for more info. + /// + /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative + /// payment), we don't do anything to stop you! We always try to ensure that if the provided + /// next hop knows the preimage to payment_hash they can claim an additional amount as + /// specified in the last hop in the route! Thus, you should probably do your own + /// payment_preimage tracking (which you should already be doing as they represent "proof of + /// payment") and prevent double-sends yourself. + /// + /// May generate SendHTLCs message(s) event on success, which should be relayed. + /// + /// Each path may have a different return value, and PaymentSendValue may return a Vec with + /// each entry matching the corresponding-index entry in the route paths, see + /// PaymentSendFailure for more info. + /// + /// In general, a path may raise: + /// * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee, + /// node public key) is specified. + /// * APIError::ChannelUnavailable if the next-hop channel is not available for updates + /// (including due to previous monitor update failure or new permanent monitor update + /// failure). + /// * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the + /// relevant updates. + /// + /// Note that depending on the type of the PaymentSendFailure the HTLC may have been + /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a + /// different route unless you intend to pay twice! + /// + /// payment_secret is unrelated to payment_hash (or PaymentPreimage) and exists to authenticate + /// the sender to the recipient and prevent payment-probing (deanonymization) attacks. For + /// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route + /// must not contain multiple paths as multi-path payments require a recipient-provided + /// payment_secret. + /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature + /// bit set (either as required or as available). If multiple paths are present in the Route, + /// we assume the invoice had the basic_mpp feature set. + pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option) -> Result<(), PaymentSendFailure> { + if route.paths.len() < 1 { + return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"})); + } + if route.paths.len() > 10 { + // This limit is completely arbitrary - there aren't any real fundamental path-count + // limits. After we support retrying individual paths we should likely bump this, but + // for now more than 10 paths likely carries too much one-path failure. + return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"})); + } + let mut total_value = 0; + let our_node_id = self.get_our_node_id(); + let mut path_errs = Vec::with_capacity(route.paths.len()); + 'path_check: for path in route.paths.iter() { + if path.len() < 1 || path.len() > 20 { + path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"})); + continue 'path_check; + } + for (idx, hop) in path.iter().enumerate() { + if idx != path.len() - 1 && hop.pubkey == our_node_id { + path_errs.push(Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"})); + continue 'path_check; + } + } + total_value += path.last().unwrap().fee_msat; + path_errs.push(Ok(())); + } + if path_errs.iter().any(|e| e.is_err()) { + return Err(PaymentSendFailure::PathParameterError(path_errs)); + } + + let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; + let mut results = Vec::new(); + for path in route.paths.iter() { + results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height)); + } + let mut has_ok = false; + let mut has_err = false; + for res in results.iter() { + if res.is_ok() { has_ok = true; } + if res.is_err() { has_err = true; } + if let &Err(APIError::MonitorUpdateFailed) = res { + // MonitorUpdateFailed is inherently unsafe to retry, so we call it a + // PartialFailure. + has_err = true; + has_ok = true; + break; + } + } + if has_err && has_ok { + Err(PaymentSendFailure::PartialFailure(results)) + } else if has_err { + Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect())) + } else { + Ok(()) } } @@ -1256,47 +1409,24 @@ impl ChannelMan pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) { let _ = self.total_consistency_lock.read().unwrap(); - let (mut chan, msg, chan_monitor) = { - let mut channel_state = self.channel_state.lock().unwrap(); - let (res, chan) = match channel_state.by_id.remove(temporary_channel_id) { + let (chan, msg) = { + let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) { Some(mut chan) => { (chan.get_outbound_funding_created(funding_txo) .map_err(|e| if let ChannelError::Close(msg) = e { - MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None) + MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None) } else { unreachable!(); }) , chan) }, None => return }; - match handle_error!(self, res, chan.get_their_node_id(), channel_state) { + match handle_error!(self, res, chan.get_their_node_id()) { Ok(funding_msg) => { - (chan, funding_msg.0, funding_msg.1) + (chan, funding_msg) }, Err(_) => { return; } } }; - // Because we have exclusive ownership of the channel here we can release the channel_state - // lock before add_monitor - if let Err(e) = self.monitor.add_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - match e { - ChannelMonitorUpdateErr::PermanentFailure => { - { - let mut channel_state = self.channel_state.lock().unwrap(); - match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None)), chan.get_their_node_id(), channel_state) { - Err(_) => { return; }, - Ok(()) => unreachable!(), - } - } - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - // Its completely fine to continue with a FundingCreated until the monitor - // update is persisted, as long as we don't generate the FundingBroadcastSafe - // until the monitor has been safely persisted (as funding broadcast is not, - // in fact, safe). - chan.monitor_update_failed(false, false, Vec::new(), Vec::new()); - }, - } - } let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { @@ -1334,6 +1464,57 @@ impl ChannelMan }) } + #[allow(dead_code)] + // Messages of up to 64KB should never end up more than half full with addresses, as that would + // be absurd. We ensure this by checking that at least 500 (our stated public contract on when + // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB + // message... + const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (msgs::NetAddress::MAX_LEN as u32 + 1) / 2; + #[deny(const_err)] + #[allow(dead_code)] + // ...by failing to compile if the number of addresses that would be half of a message is + // smaller than 500: + const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500; + + /// Generates a signed node_announcement from the given arguments and creates a + /// BroadcastNodeAnnouncement event. Note that such messages will be ignored unless peers have + /// seen a channel_announcement from us (ie unless we have public channels open). + /// + /// RGB is a node "color" and alias is a printable human-readable string to describe this node + /// to humans. They carry no in-protocol meaning. + /// + /// addresses represent the set (possibly empty) of socket addresses on which this node accepts + /// incoming connections. These will be broadcast to the network, publicly tying these + /// addresses together. If you wish to preserve user privacy, addresses should likely contain + /// only Tor Onion addresses. + /// + /// Panics if addresses is absurdly large (more than 500). + pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec) { + let _ = self.total_consistency_lock.read().unwrap(); + + if addresses.len() > 500 { + panic!("More than half the message size was taken up by public addresses!"); + } + + let announcement = msgs::UnsignedNodeAnnouncement { + features: NodeFeatures::supported(), + timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32, + node_id: self.get_our_node_id(), + rgb, alias, addresses, + excess_address_data: Vec::new(), + excess_data: Vec::new(), + }; + let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); + + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement { + msg: msgs::NodeAnnouncement { + signature: self.secp_ctx.sign(&msghash, &self.our_network_key), + contents: announcement + }, + }); + } + /// Processes HTLCs which are pending waiting on random forward delay. /// /// Should only really ever be called in response to a PendingHTLCsForwardable event. @@ -1362,7 +1543,9 @@ impl ChannelMan htlc_id: prev_htlc_id, incoming_packet_shared_secret: forward_info.incoming_shared_secret, }); - failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None)); + failed_forwards.push((htlc_source, forward_info.payment_hash, + HTLCFailReason::Reason { failure_code: 0x4000 | 10, data: Vec::new() } + )); }, HTLCForwardInfo::FailHTLC { .. } => { // Channel went away before we could fail it. This implies @@ -1380,22 +1563,27 @@ impl ChannelMan let mut fail_htlc_msgs = Vec::new(); for forward_info in pending_forwards.drain(..) { match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { - log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id); + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { + routing: PendingHTLCRouting::Forward { + onion_packet, .. + }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, } => { + log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(payment_hash.0), prev_short_channel_id, short_chan_id); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, + incoming_packet_shared_secret: incoming_shared_secret, }); - match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) { + match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet) { Err(e) => { if let ChannelError::Ignore(msg) = e { - log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg); + log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); } else { panic!("Stated return value requirements in send_htlc() were not met"); } let chan_update = self.get_channel_update(chan.get()).unwrap(); - failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update))); + failed_forwards.push((htlc_source, payment_hash, + HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.encode_with_len() } + )); continue; }, Ok(update_add) => { @@ -1414,6 +1602,9 @@ impl ChannelMan } } }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); + }, HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id); match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) { @@ -1461,14 +1652,12 @@ impl ChannelMan if let Some(short_id) = channel.get_short_channel_id() { channel_state.short_to_id.remove(&short_id); } - Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(), self.get_channel_update(&channel).ok())) + Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(true), self.get_channel_update(&channel).ok())) }, ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); } }; - match handle_error!(self, err, their_node_id, channel_state) { - Ok(_) => unreachable!(), - Err(_) => { continue; }, - } + handle_errors.push((their_node_id, err)); + continue; } }; if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { @@ -1493,20 +1682,68 @@ impl ChannelMan } else { for forward_info in pending_forwards.drain(..) { match forward_info { - HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => { - let prev_hop_data = HTLCPreviousHopData { + HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { + routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry }, + incoming_shared_secret, payment_hash, amt_to_forward, .. }, } => { + let prev_hop = HTLCPreviousHopData { short_channel_id: prev_short_channel_id, htlc_id: prev_htlc_id, - incoming_packet_shared_secret: forward_info.incoming_shared_secret, - }; - match channel_state.claimable_htlcs.entry(forward_info.payment_hash) { - hash_map::Entry::Occupied(mut entry) => entry.get_mut().push((forward_info.amt_to_forward, prev_hop_data)), - hash_map::Entry::Vacant(entry) => { entry.insert(vec![(forward_info.amt_to_forward, prev_hop_data)]); }, + incoming_packet_shared_secret: incoming_shared_secret, }; - new_events.push(events::Event::PaymentReceived { - payment_hash: forward_info.payment_hash, - amt: forward_info.amt_to_forward, + + let mut total_value = 0; + let payment_secret_opt = + if let &Some(ref data) = &payment_data { Some(data.payment_secret.clone()) } else { None }; + let htlcs = channel_state.claimable_htlcs.entry((payment_hash, payment_secret_opt)) + .or_insert(Vec::new()); + htlcs.push(ClaimableHTLC { + prev_hop, + value: amt_to_forward, + payment_data: payment_data.clone(), + cltv_expiry: incoming_cltv_expiry, }); + if let &Some(ref data) = &payment_data { + for htlc in htlcs.iter() { + total_value += htlc.value; + if htlc.payment_data.as_ref().unwrap().total_msat != data.total_msat { + total_value = msgs::MAX_VALUE_MSAT; + } + if total_value >= msgs::MAX_VALUE_MSAT { break; } + } + if total_value >= msgs::MAX_VALUE_MSAT || total_value > data.total_msat { + for htlc in htlcs.iter() { + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice( + &byte_utils::be32_to_array( + self.latest_block_height.load(Ordering::Acquire) + as u32, + ), + ); + failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: htlc.prev_hop.short_channel_id, + htlc_id: htlc.prev_hop.htlc_id, + incoming_packet_shared_secret: htlc.prev_hop.incoming_packet_shared_secret, + }), payment_hash, + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data } + )); + } + } else if total_value == data.total_msat { + new_events.push(events::Event::PaymentReceived { + payment_hash: payment_hash, + payment_secret: Some(data.payment_secret), + amt: total_value, + }); + } + } else { + new_events.push(events::Event::PaymentReceived { + payment_hash: payment_hash, + payment_secret: None, + amt: amt_to_forward, + }); + } + }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); }, HTLCForwardInfo::FailHTLC { .. } => { panic!("Got pending fail of our own HTLC"); @@ -1517,18 +1754,12 @@ impl ChannelMan } } - for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) { - match update { - None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }), - Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }), - }; + for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason); } - if handle_errors.len() > 0 { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - for (their_node_id, err) in handle_errors.drain(..) { - let _ = handle_error!(self, err, their_node_id, channel_state_lock); - } + for (their_node_id, err) in handle_errors.drain(..) { + let _ = handle_error!(self, err, their_node_id); } if new_events.is_empty() { return } @@ -1566,17 +1797,21 @@ impl ChannelMan /// along the path (including in our own channel on which we received it). /// Returns false if no payment was found to fail backwards, true if the process of failing the /// HTLC backwards has been started. - pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool { + pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, payment_secret: &Option) -> bool { let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state = Some(self.channel_state.lock().unwrap()); - let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); + let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(*payment_hash, *payment_secret)); if let Some(mut sources) = removed_source { - for (recvd_value, htlc_with_hash) in sources.drain(..) { + for htlc in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( + self.latest_block_height.load(Ordering::Acquire) as u32, + )); self.fail_htlc_backwards_internal(channel_state.take().unwrap(), - HTLCSource::PreviousHopData(htlc_with_hash), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(recvd_value).to_vec() }); + HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash, + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }); } true } else { false } @@ -1594,15 +1829,15 @@ impl ChannelMan //between the branches here. We should make this async and move it into the forward HTLCs //timer handling. match source { - HTLCSource::OutboundRoute { ref route, .. } => { + HTLCSource::OutboundRoute { ref path, .. } => { log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0)); mem::drop(channel_state_lock); match &onion_error { &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] - let (channel_update, payment_retryable, onion_error_code) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (channel_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] - let (channel_update, payment_retryable, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (channel_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); // TODO: If we decided to blame ourselves (or one of our channels) in // process_onion_failure we should close that channel as it implies our // next-hop is needlessly blaming us! @@ -1618,13 +1853,17 @@ impl ChannelMan payment_hash: payment_hash.clone(), rejected_by_dest: !payment_retryable, #[cfg(test)] - error_code: onion_error_code + error_code: onion_error_code, +#[cfg(test)] + error_data: onion_error_data } ); }, &HTLCFailReason::Reason { #[cfg(test)] ref failure_code, +#[cfg(test)] + ref data, .. } => { // we get a fail_malformed_htlc from the first hop // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary @@ -1636,9 +1875,11 @@ impl ChannelMan self.pending_events.lock().unwrap().push( events::Event::PaymentFailed { payment_hash: payment_hash.clone(), - rejected_by_dest: route.hops.len() == 1, + rejected_by_dest: path.len() == 1, #[cfg(test)] error_code: Some(*failure_code), +#[cfg(test)] + error_data: Some(data.clone()), } ); } @@ -1691,96 +1932,170 @@ impl ChannelMan /// privacy-breaking recipient-probing attacks which may reveal payment activity to /// motivated attackers. /// + /// Note that the privacy concerns in (b) are not relevant in payments with a payment_secret + /// set. Thus, for such payments we will claim any payments which do not under-pay. + /// /// May panic if called except in response to a PaymentReceived event. - pub fn claim_funds(&self, payment_preimage: PaymentPreimage, expected_amount: u64) -> bool { + pub fn claim_funds(&self, payment_preimage: PaymentPreimage, payment_secret: &Option, expected_amount: u64) -> bool { let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); let _ = self.total_consistency_lock.read().unwrap(); let mut channel_state = Some(self.channel_state.lock().unwrap()); - let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); + let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(payment_hash, *payment_secret)); if let Some(mut sources) = removed_source { - for (received_amount, htlc_with_hash) in sources.drain(..) { + assert!(!sources.is_empty()); + + // If we are claiming an MPP payment, we have to take special care to ensure that each + // channel exists before claiming all of the payments (inside one lock). + // Note that channel existance is sufficient as we should always get a monitor update + // which will take care of the real HTLC claim enforcement. + // + // If we find an HTLC which we would need to claim but for which we do not have a + // channel, we will fail all parts of the MPP payment. While we could wait and see if + // the sender retries the already-failed path(s), it should be a pretty rare case where + // we got all the HTLCs and then a channel closed while we were waiting for the user to + // provide the preimage, so worrying too much about the optimal handling isn't worth + // it. + + let (is_mpp, mut valid_mpp) = if let &Some(ref data) = &sources[0].payment_data { + assert!(payment_secret.is_some()); + (true, data.total_msat >= expected_amount) + } else { + assert!(payment_secret.is_none()); + (false, false) + }; + + for htlc in sources.iter() { + if !is_mpp || !valid_mpp { break; } + if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) { + valid_mpp = false; + } + } + + let mut errs = Vec::new(); + let mut claimed_any_htlcs = false; + for htlc in sources.drain(..) { if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } - if received_amount < expected_amount || received_amount > expected_amount * 2 { - let mut htlc_msat_data = byte_utils::be64_to_array(received_amount).to_vec(); - let mut height_data = byte_utils::be32_to_array(self.latest_block_height.load(Ordering::Acquire) as u32).to_vec(); - htlc_msat_data.append(&mut height_data); + if (is_mpp && !valid_mpp) || (!is_mpp && (htlc.value < expected_amount || htlc.value > expected_amount * 2)) { + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( + self.latest_block_height.load(Ordering::Acquire) as u32, + )); self.fail_htlc_backwards_internal(channel_state.take().unwrap(), - HTLCSource::PreviousHopData(htlc_with_hash), &payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_data }); + HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash, + HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }); } else { - self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage); + match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { + Err(Some(e)) => { + if let msgs::ErrorAction::IgnoreError = e.1.err.action { + // We got a temporary failure updating monitor, but will claim the + // HTLC when the monitor updating is restored (or on chain). + log_error!(self, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err); + claimed_any_htlcs = true; + } else { errs.push(e); } + }, + Err(None) if is_mpp => unreachable!("We already checked for channel existence, we can't fail here!"), + Err(None) => { + log_warn!(self, "Channel we expected to claim an HTLC from was closed."); + }, + Ok(()) => claimed_any_htlcs = true, + } } } - true + + // Now that we've done the entire above loop in one lock, we can handle any errors + // which were generated. + channel_state.take(); + + for (their_node_id, err) in errs.drain(..) { + let res: Result<(), _> = Err(err); + let _ = handle_error!(self, res, their_node_id); + } + + claimed_any_htlcs } else { false } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage) { - let (their_node_id, err) = loop { - match source { - HTLCSource::OutboundRoute { .. } => { - mem::drop(channel_state_lock); - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentSent { - payment_preimage - }); - }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => { - //TODO: Delay the claimed_funds relaying just like we do outbound relay! - let channel_state = &mut *channel_state_lock; - let chan_id = match channel_state.short_to_id.get(&short_channel_id) { - Some(chan_id) => chan_id.clone(), - None => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel already hit the chain and that's - // why it's missing. - return - } - }; + fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> { + //TODO: Delay the claimed_funds relaying just like we do outbound relay! + let channel_state = &mut **channel_state_lock; + let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { + Some(chan_id) => chan_id.clone(), + None => { + return Err(None) + } + }; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { - let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); - match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) { - Ok((msgs, monitor_option)) => { - if let Some(monitor_update) = monitor_option { - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { - if was_frozen_for_monitor { - assert!(msgs.is_none()); - } else { - break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some())); - } - } - } - if let Some((msg, commitment_signed)) = msgs { - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_their_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - } - }); - } - }, - Err(_e) => { - // TODO: There is probably a channel manager somewhere that needs to - // learn the preimage as the channel may be about to hit the chain. - //TODO: Do something with e? - return - }, + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { + let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); + match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage) { + Ok((msgs, monitor_option)) => { + if let Some(monitor_update) = monitor_option { + if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if was_frozen_for_monitor { + assert!(msgs.is_none()); + } else { + return Err(Some((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err()))); + } } - } else { unreachable!(); } + } + if let Some((msg, commitment_signed)) = msgs { + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_their_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } + return Ok(()) + }, + Err(e) => { + // TODO: Do something with e? + // This should only occur if we are claiming an HTLC at the same time as the + // HTLC is being failed (eg because a block is being connected and this caused + // an HTLC to time out). This should, of course, only occur if the user is the + // one doing the claiming (as it being a part of a peer claim would imply we're + // about to lose funds) and only if the lock in claim_funds was dropped as a + // previous HTLC was failed (thus not for an MPP payment). + debug_assert!(false, "This shouldn't be reachable except in absurdly rare cases between monitor updates and HTLC timeouts: {:?}", e); + return Err(None) }, } - return; - }; + } else { unreachable!(); } + } - let _ = handle_error!(self, err, their_node_id, channel_state_lock); + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage) { + match source { + HTLCSource::OutboundRoute { .. } => { + mem::drop(channel_state_lock); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentSent { + payment_preimage + }); + }, + HTLCSource::PreviousHopData(hop_data) => { + if let Err((their_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) { + Ok(()) => Ok(()), + Err(None) => { + // TODO: There is probably a channel monitor somewhere that needs to + // learn the preimage as the channel already hit the chain and that's + // why it's missing. + Ok(()) + }, + Err(Some(res)) => Err(res), + } { + mem::drop(channel_state_lock); + let res: Result<(), _> = Err(err); + let _ = handle_error!(self, res, their_node_id); + } + }, + } } /// Gets the node_id held by this ChannelManager @@ -1957,14 +2272,14 @@ impl ChannelMan }; // Because we have exclusive ownership of the channel here we can release the channel_state // lock before add_monitor - if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo(), monitor_update) { match e { ChannelMonitorUpdateErr::PermanentFailure => { // Note that we reply with the new channel_id in error messages if we gave up on the // channel, not the temporary_channel_id. This is compatible with ourselves, but the // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for // any messages referencing a previously-closed channel anyway. - return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None)); + return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(true), None)); }, ChannelMonitorUpdateErr::TemporaryFailure => { // There's no problem signing a counterparty's funding transaction if our monitor @@ -2001,17 +2316,11 @@ impl ChannelMan if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let monitor_update = match chan.get_mut().funding_signed(&msg) { - Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), - Err((Some(monitor_update), e)) => { - assert!(chan.get().is_awaiting_monitor_update()); - let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update); - try_chan_entry!(self, Err(e), channel_state, chan); - unreachable!(); - }, + let monitor = match chan.get_mut().funding_signed(&msg) { Ok(update) => update, + Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.monitor.add_monitor(chan.get().get_funding_txo().unwrap(), monitor) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); } (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) @@ -2322,7 +2631,10 @@ impl ChannelMan forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)) } for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { - match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { + match channel_state.forward_htlcs.entry(match forward_info.routing { + PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, + PendingHTLCRouting::Receive { .. } => 0, + }) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info }); }, @@ -2524,9 +2836,9 @@ impl ChannelMan #[doc(hidden)] pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> { let _ = self.total_consistency_lock.read().unwrap(); - let mut channel_state_lock = self.channel_state.lock().unwrap(); let their_node_id; let err: Result<(), _> = loop { + let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(channel_id) { @@ -2565,7 +2877,7 @@ impl ChannelMan return Ok(()) }; - match handle_error!(self, err, their_node_id, channel_state_lock) { + match handle_error!(self, err, their_node_id) { Ok(_) => unreachable!(), Err(e) => { Err(APIError::APIMisuseError { err: e.err })} } @@ -2644,29 +2956,39 @@ impl= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { + failure_code: 0x4000 | 15, + data: htlc_msat_height_data + })); + false + } else { true } + }); + !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }); } for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } + + for (source, payment_hash, reason) in timed_out_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason); + } self.latest_block_height.store(height as usize, Ordering::Release); *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash; + loop { + // Update last_node_announcement_serial to be the max of its current value and the + // block timestamp. This should keep us close to the current time without relying on + // having an explicit local time source. + // Just in case we end up in a race, we loop until we either successfully update + // last_node_announcement_serial or decide we don't need to. + let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire); + if old_serial >= header.time as usize { break; } + if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() { + break; + } + } } /// We force-close the channel without letting our counterparty participate in the shutdown @@ -2735,7 +3091,7 @@ impl node_id != their_node_id, &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, + &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id, &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true, @@ -3052,10 +3345,20 @@ const MIN_SERIALIZATION_VERSION: u8 = 1; impl Writeable for PendingHTLCInfo { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { - self.onion_packet.write(writer)?; + match &self.routing { + &PendingHTLCRouting::Forward { ref onion_packet, ref short_channel_id } => { + 0u8.write(writer)?; + onion_packet.write(writer)?; + short_channel_id.write(writer)?; + }, + &PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => { + 1u8.write(writer)?; + payment_data.write(writer)?; + incoming_cltv_expiry.write(writer)?; + }, + } self.incoming_shared_secret.write(writer)?; self.payment_hash.write(writer)?; - self.short_channel_id.write(writer)?; self.amt_to_forward.write(writer)?; self.outgoing_cltv_value.write(writer)?; Ok(()) @@ -3065,10 +3368,19 @@ impl Writeable for PendingHTLCInfo { impl Readable for PendingHTLCInfo { fn read(reader: &mut R) -> Result { Ok(PendingHTLCInfo { - onion_packet: Readable::read(reader)?, + routing: match Readable::read(reader)? { + 0u8 => PendingHTLCRouting::Forward { + onion_packet: Readable::read(reader)?, + short_channel_id: Readable::read(reader)?, + }, + 1u8 => PendingHTLCRouting::Receive { + payment_data: Readable::read(reader)?, + incoming_cltv_expiry: Readable::read(reader)?, + }, + _ => return Err(DecodeError::InvalidValue), + }, incoming_shared_secret: Readable::read(reader)?, payment_hash: Readable::read(reader)?, - short_channel_id: Readable::read(reader)?, amt_to_forward: Readable::read(reader)?, outgoing_cltv_value: Readable::read(reader)?, }) @@ -3133,6 +3445,13 @@ impl_writeable!(HTLCPreviousHopData, 0, { incoming_packet_shared_secret }); +impl_writeable!(ClaimableHTLC, 0, { + prev_hop, + value, + payment_data, + cltv_expiry +}); + impl Writeable for HTLCSource { fn write(&self, writer: &mut W) -> Result<(), ::std::io::Error> { match self { @@ -3140,9 +3459,9 @@ impl Writeable for HTLCSource { 0u8.write(writer)?; hop_data.write(writer)?; }, - &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } => { + &HTLCSource::OutboundRoute { ref path, ref session_priv, ref first_hop_htlc_msat } => { 1u8.write(writer)?; - route.write(writer)?; + path.write(writer)?; session_priv.write(writer)?; first_hop_htlc_msat.write(writer)?; } @@ -3156,7 +3475,7 @@ impl Readable for HTLCSource { match ::read(reader)? { 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), 1 => Ok(HTLCSource::OutboundRoute { - route: Readable::read(reader)?, + path: Readable::read(reader)?, session_priv: Readable::read(reader)?, first_hop_htlc_msat: Readable::read(reader)?, }), @@ -3274,9 +3593,8 @@ impl monitor.get_latest_update_id() { + // If the channel is ahead of the monitor, return InvalidValue: + return Err(DecodeError::InvalidValue); + } else if channel.get_cur_local_commitment_transaction_number() > monitor.get_cur_local_commitment_number() || + channel.get_revoked_remote_commitment_transaction_number() > monitor.get_min_seen_secret() || + channel.get_cur_remote_commitment_transaction_number() > monitor.get_cur_remote_commitment_number() || + channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + // But if the channel is behind of the monitor, close the channel: + let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true); + failed_htlcs.append(&mut new_failed_htlcs); + monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster); } else { if let Some(short_channel_id) = channel.get_short_channel_id() { short_to_id.insert(short_channel_id, channel.channel_id()); @@ -3421,7 +3748,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() { if !funding_txo_set.contains(funding_txo) { - closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new())); + monitor.broadcast_latest_local_commitment_txn(&args.tx_broadcaster); } } @@ -3444,7 +3771,7 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De let previous_hops_len: u64 = Readable::read(reader)?; let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2)); for _ in 0..previous_hops_len { - previous_hops.push((Readable::read(reader)?, Readable::read(reader)?)); + previous_hops.push(Readable::read(reader)?); } claimable_htlcs.insert(payment_hash, previous_hops); } @@ -3459,6 +3786,8 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De per_peer_state.insert(peer_pubkey, Mutex::new(peer_state)); } + let last_node_announcement_serial: u32 = Readable::read(reader)?; + let channel_manager = ChannelManager { genesis_hash, fee_estimator: args.fee_estimator, @@ -3478,6 +3807,8 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De }), our_network_key: args.keys_manager.get_node_secret(), + last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), + per_peer_state: RwLock::new(per_peer_state), pending_events: Mutex::new(Vec::new()), @@ -3487,12 +3818,13 @@ impl<'a, ChanSigner: ChannelKeys + Readable, M: Deref, T: Deref, K: Deref, F: De default_configuration: args.default_config, }; - for close_res in closed_channels.drain(..) { - channel_manager.finish_force_close_channel(close_res); - //TODO: Broadcast channel update for closed channels, but only after we've made a - //connection or two. + for htlc_source in failed_htlcs.drain(..) { + channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); } + //TODO: Broadcast channel update for closed channels, but only after we've made a + //connection or two. + Ok((last_block_hash.clone(), channel_manager)) } }