X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=8086fe3a9601b01d74ee36829fd1c13af659ec40;hb=795aff8da5b79dee1bea8234479cdf0ff5c98118;hp=c59560944d8bc6bb02be2565dee22a89e22d040b;hpb=fae46a02e38a1d77b7d575bae4d88dca63beda8b;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index c5956094..8086fe3a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -28,7 +28,7 @@ use secp256k1; use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator}; use chain::transaction::OutPoint; use ln::channel::{Channel, ChannelError}; -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use ln::features::{InitFeatures, NodeFeatures}; use ln::router::{Route, RouteHop}; use ln::msgs; @@ -76,6 +76,7 @@ enum PendingHTLCRouting { }, Receive { payment_data: Option, + incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed }, } @@ -129,6 +130,7 @@ struct ClaimableHTLC { /// payment_secret which prevents path-probing attacks and can associate different HTLCs which /// are part of the same payment. payment_data: Option, + cltv_expiry: u32, } /// Tracks the inbound corresponding to an outbound HTLC @@ -296,8 +298,6 @@ pub(super) struct ChannelHolder { /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the channels given here actually existing anymore by the time you /// go to read them! - /// TODO: We need to time out HTLCs sitting here which are waiting on other AMP HTLCs to - /// arrive. claimable_htlcs: HashMap<(PaymentHash, Option), Vec>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). @@ -1063,7 +1063,10 @@ impl ChannelMan // delay) once they've send us a commitment_signed! PendingHTLCStatus::Forward(PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data }, + routing: PendingHTLCRouting::Receive { + payment_data, + incoming_cltv_expiry: msg.cltv_expiry, + }, payment_hash: msg.payment_hash.clone(), incoming_shared_secret: shared_secret, amt_to_forward: next_hop_data.amt_to_forward, @@ -1160,8 +1163,9 @@ impl ChannelMan break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap()))); } let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; - // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration - if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon + // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty + // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational) + if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far @@ -1222,6 +1226,80 @@ impl ChannelMan }) } + // Only public for testing, this should otherwise never be called direcly + pub(crate) fn send_payment_along_path(&self, path: &Vec, payment_hash: &PaymentHash, payment_secret: &Option, total_value: u64, cur_height: u32) -> Result<(), APIError> { + log_trace!(self, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); + let (session_priv, prng_seed) = self.keys_manager.get_onion_rand(); + + let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) + .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?; + let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height)?; + if onion_utils::route_size_insane(&onion_payloads) { + return Err(APIError::RouteError{err: "Route size too large considering onion data"}); + } + let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash); + + let _ = self.total_consistency_lock.read().unwrap(); + + let err: Result<(), _> = loop { + let mut channel_lock = self.channel_state.lock().unwrap(); + let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { + None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}), + Some(id) => id.clone(), + }; + + let channel_state = &mut *channel_lock; + if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { + match { + if chan.get().get_their_node_id() != path.first().unwrap().pubkey { + return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"}); + } + if !chan.get().is_live() { + return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"}); + } + break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { + path: path.clone(), + session_priv: session_priv.clone(), + first_hop_htlc_msat: htlc_msat, + }, onion_packet), channel_state, chan) + } { + Some((update_add, commitment_signed, monitor_update)) => { + if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); + // Note that MonitorUpdateFailed here indicates (per function docs) + // that we will resend the commitment update once monitor updating + // is restored. Therefore, we must return an error indicating that + // it is unsafe to retry the payment wholesale, which we do in the + // send_payment check for MonitorUpdateFailed, below. + return Err(APIError::MonitorUpdateFailed); + } + + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: path.first().unwrap().pubkey, + updates: msgs::CommitmentUpdate { + update_add_htlcs: vec![update_add], + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + }, + }); + }, + None => {}, + } + } else { unreachable!(); } + return Ok(()); + }; + + match handle_error!(self, err, path.first().unwrap().pubkey) { + Ok(_) => unreachable!(), + Err(e) => { + Err(APIError::ChannelUnavailable { err: e.err }) + }, + } + } + /// Sends a payment along a given route. /// /// Value parameters are provided via the last hop in route, see documentation for RouteHop @@ -1294,89 +1372,8 @@ impl ChannelMan let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1; let mut results = Vec::new(); - 'path_loop: for path in route.paths.iter() { - macro_rules! check_res_push { - ($res: expr) => { match $res { - Ok(r) => r, - Err(e) => { - results.push(Err(e)); - continue 'path_loop; - }, - } - } - } - - log_trace!(self, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id); - let (session_priv, prng_seed) = self.keys_manager.get_onion_rand(); - - let onion_keys = check_res_push!(onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv) - .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})); - let (onion_payloads, htlc_msat, htlc_cltv) = check_res_push!(onion_utils::build_onion_payloads(&path, total_value, payment_secret, cur_height)); - if onion_utils::route_size_insane(&onion_payloads) { - check_res_push!(Err(APIError::RouteError{err: "Route size too large considering onion data"})); - } - let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, &payment_hash); - - let _ = self.total_consistency_lock.read().unwrap(); - - let err: Result<(), _> = loop { - let mut channel_lock = self.channel_state.lock().unwrap(); - let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) { - None => check_res_push!(Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"})), - Some(id) => id.clone(), - }; - - let channel_state = &mut *channel_lock; - if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) { - match { - if chan.get().get_their_node_id() != path.first().unwrap().pubkey { - check_res_push!(Err(APIError::RouteError{err: "Node ID mismatch on first hop!"})); - } - if !chan.get().is_live() { - check_res_push!(Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"})); - } - break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { - path: path.clone(), - session_priv: session_priv.clone(), - first_hop_htlc_msat: htlc_msat, - }, onion_packet), channel_state, chan) - } { - Some((update_add, commitment_signed, monitor_update)) => { - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { - maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); - // Note that MonitorUpdateFailed here indicates (per function docs) - // that we will resend the commitment update once monitor updating - // is restored. Therefore, we must return an error indicating that - // it is unsafe to retry the payment wholesale, which we do in the - // next check for MonitorUpdateFailed, below. - check_res_push!(Err(APIError::MonitorUpdateFailed)); - } - - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: path.first().unwrap().pubkey, - updates: msgs::CommitmentUpdate { - update_add_htlcs: vec![update_add], - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - }, - }); - }, - None => {}, - } - } else { unreachable!(); } - results.push(Ok(())); - continue 'path_loop; - }; - - match handle_error!(self, err, path.first().unwrap().pubkey) { - Ok(_) => unreachable!(), - Err(e) => { - check_res_push!(Err(APIError::ChannelUnavailable { err: e.err })); - }, - } + for path in route.paths.iter() { + results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height)); } let mut has_ok = false; let mut has_err = false; @@ -1412,7 +1409,7 @@ impl ChannelMan pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) { let _ = self.total_consistency_lock.read().unwrap(); - let (mut chan, msg, chan_monitor) = { + let (chan, msg) = { let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) { Some(mut chan) => { (chan.get_outbound_funding_created(funding_txo) @@ -1425,30 +1422,11 @@ impl ChannelMan }; match handle_error!(self, res, chan.get_their_node_id()) { Ok(funding_msg) => { - (chan, funding_msg.0, funding_msg.1) + (chan, funding_msg) }, Err(_) => { return; } } }; - // Because we have exclusive ownership of the channel here we can release the channel_state - // lock before add_monitor - if let Err(e) = self.monitor.add_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) { - match e { - ChannelMonitorUpdateErr::PermanentFailure => { - match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(true), None)), chan.get_their_node_id()) { - Err(_) => { return; }, - Ok(()) => unreachable!(), - } - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - // Its completely fine to continue with a FundingCreated until the monitor - // update is persisted, as long as we don't generate the FundingBroadcastSafe - // until the monitor has been safely persisted (as funding broadcast is not, - // in fact, safe). - chan.monitor_update_failed(false, false, Vec::new(), Vec::new()); - }, - } - } let mut channel_state = self.channel_state.lock().unwrap(); channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { @@ -1705,7 +1683,7 @@ impl ChannelMan for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { - routing: PendingHTLCRouting::Receive { payment_data }, + routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry }, incoming_shared_secret, payment_hash, amt_to_forward, .. }, } => { let prev_hop = HTLCPreviousHopData { short_channel_id: prev_short_channel_id, @@ -1722,6 +1700,7 @@ impl ChannelMan prev_hop, value: amt_to_forward, payment_data: payment_data.clone(), + cltv_expiry: incoming_cltv_expiry, }); if let &Some(ref data) = &payment_data { for htlc in htlcs.iter() { @@ -1856,9 +1835,9 @@ impl ChannelMan match &onion_error { &HTLCFailReason::LightningError { ref err } => { #[cfg(test)] - let (channel_update, payment_retryable, onion_error_code) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (channel_update, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); #[cfg(not(test))] - let (channel_update, payment_retryable, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); + let (channel_update, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone()); // TODO: If we decided to blame ourselves (or one of our channels) in // process_onion_failure we should close that channel as it implies our // next-hop is needlessly blaming us! @@ -1874,13 +1853,17 @@ impl ChannelMan payment_hash: payment_hash.clone(), rejected_by_dest: !payment_retryable, #[cfg(test)] - error_code: onion_error_code + error_code: onion_error_code, +#[cfg(test)] + error_data: onion_error_data } ); }, &HTLCFailReason::Reason { #[cfg(test)] ref failure_code, +#[cfg(test)] + ref data, .. } => { // we get a fail_malformed_htlc from the first hop // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary @@ -1895,6 +1878,8 @@ impl ChannelMan rejected_by_dest: path.len() == 1, #[cfg(test)] error_code: Some(*failure_code), +#[cfg(test)] + error_data: Some(data.clone()), } ); } @@ -2287,7 +2272,7 @@ impl ChannelMan }; // Because we have exclusive ownership of the channel here we can release the channel_state // lock before add_monitor - if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.monitor.add_monitor(monitor_update.get_funding_txo(), monitor_update) { match e { ChannelMonitorUpdateErr::PermanentFailure => { // Note that we reply with the new channel_id in error messages if we gave up on the @@ -2331,17 +2316,11 @@ impl ChannelMan if chan.get().get_their_node_id() != *their_node_id { return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id)); } - let monitor_update = match chan.get_mut().funding_signed(&msg) { - Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan), - Err((Some(monitor_update), e)) => { - assert!(chan.get().is_awaiting_monitor_update()); - let _ = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update); - try_chan_entry!(self, Err(e), channel_state, chan); - unreachable!(); - }, + let monitor = match chan.get_mut().funding_signed(&msg) { Ok(update) => update, + Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; - if let Err(e) = self.monitor.update_monitor(chan.get().get_funding_txo().unwrap(), monitor_update) { + if let Err(e) = self.monitor.add_monitor(chan.get().get_funding_txo().unwrap(), monitor) { return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false); } (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id()) @@ -2977,29 +2956,39 @@ impl= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { + failure_code: 0x4000 | 15, + data: htlc_msat_height_data + })); + false + } else { true } + }); + !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }); } for failure in failed_channels.drain(..) { self.finish_force_close_channel(failure); } + + for (source, payment_hash, reason) in timed_out_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason); + } self.latest_block_height.store(height as usize, Ordering::Release); *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash; loop { @@ -3339,9 +3351,10 @@ impl Writeable for PendingHTLCInfo { onion_packet.write(writer)?; short_channel_id.write(writer)?; }, - &PendingHTLCRouting::Receive { ref payment_data } => { + &PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => { 1u8.write(writer)?; payment_data.write(writer)?; + incoming_cltv_expiry.write(writer)?; }, } self.incoming_shared_secret.write(writer)?; @@ -3362,6 +3375,7 @@ impl Readable for PendingHTLCInfo { }, 1u8 => PendingHTLCRouting::Receive { payment_data: Readable::read(reader)?, + incoming_cltv_expiry: Readable::read(reader)?, }, _ => return Err(DecodeError::InvalidValue), }, @@ -3434,7 +3448,8 @@ impl_writeable!(HTLCPreviousHopData, 0, { impl_writeable!(ClaimableHTLC, 0, { prev_hop, value, - payment_data + payment_data, + cltv_expiry }); impl Writeable for HTLCSource {