X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=da6f6d958b145f5b1cb13bc85dfc1c66c12bfa53;hb=40f48def109c61d6b569e03113820ee042837d1b;hp=444d443facf98dbb2915172a33c754ea42abb0fe;hpb=0ec13f611bbbee831e37416f052c97d924b23ec0;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 444d443fa..da6f6d958 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -173,6 +173,7 @@ struct ClaimableHTLC { } /// A payment identifier used to uniquely identify a payment to LDK. +/// (C-not exported) as we just use [u8; 32] directly #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)] pub struct PaymentId(pub [u8; 32]); @@ -945,7 +946,16 @@ pub enum PaymentSendFailure { /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel /// with the latest update_id. - PartialFailure(Vec>), + PartialFailure { + /// The errors themselves, in the same order as the route hops. + results: Vec>, + /// If some paths failed without irrevocably committing to the new HTLC(s), this will + /// contain a [`RouteParameters`] object which can be used to calculate a new route that + /// will pay all remaining unpaid balance. + failed_paths_retry: Option, + /// The payment id for the payment, which is now at least partially pending. + payment_id: PaymentId, + }, } macro_rules! handle_error { @@ -1131,7 +1141,7 @@ macro_rules! handle_monitor_err { res } }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { - handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new()); + handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new()) } } @@ -1388,7 +1398,8 @@ impl ChannelMana let peer_state = peer_state.lock().unwrap(); let their_features = &peer_state.latest_features; let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration }; - Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, channel_value_satoshis, push_msat, user_channel_id, config)? + Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, their_features, + channel_value_satoshis, push_msat, user_channel_id, config, self.best_block.read().unwrap().height())? }, None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }), } @@ -1949,17 +1960,24 @@ impl ChannelMana break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } let cur_height = self.best_block.read().unwrap().height() + 1; - // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty - // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational) + // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, + // but we want to be robust wrt to counterparty packet sanitization (see + // HTLC_FAIL_BACK_BUFFER rationale). if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far break Some(("CLTV expiry is too far in the future", 21, None)); } - // In theory, we would be safe against unintentional channel-closure, if we only required a margin of LATENCY_GRACE_PERIOD_BLOCKS. - // But, to be safe against policy reception, we use a longer delay. - if (*outgoing_cltv_value) as u64 <= (cur_height + HTLC_FAIL_BACK_BUFFER) as u64 { + // If the HTLC expires ~now, don't bother trying to forward it to our + // counterparty. They should fail it anyway, but we don't want to bother with + // the round-trips or risk them deciding they definitely want the HTLC and + // force-closing to ensure they get it if we're offline. + // We previously had a much more aggressive check here which tried to ensure + // our counterparty receives an HTLC which has *our* risk threshold met on it, + // but there is no need to do that, and since we're a bit conservative with our + // risk threshold it just results in failing to forward payments. + if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 { break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, Some(self.get_channel_update_for_unicast(chan).unwrap()))); } @@ -2239,7 +2257,9 @@ impl ChannelMana } let mut has_ok = false; let mut has_err = false; - for res in results.iter() { + let mut pending_amt_unsent = 0; + let mut max_unsent_cltv_delta = 0; + for (res, path) in results.iter().zip(route.paths.iter()) { if res.is_ok() { has_ok = true; } if res.is_err() { has_err = true; } if let &Err(APIError::MonitorUpdateFailed) = res { @@ -2247,11 +2267,25 @@ impl ChannelMana // PartialFailure. has_err = true; has_ok = true; - break; + } else if res.is_err() { + pending_amt_unsent += path.last().unwrap().fee_msat; + max_unsent_cltv_delta = cmp::max(max_unsent_cltv_delta, path.last().unwrap().cltv_expiry_delta); } } if has_err && has_ok { - Err(PaymentSendFailure::PartialFailure(results)) + Err(PaymentSendFailure::PartialFailure { + results, + payment_id, + failed_paths_retry: if pending_amt_unsent != 0 { + if let Some(payee) = &route.payee { + Some(RouteParameters { + payee: payee.clone(), + final_value_msat: pending_amt_unsent, + final_cltv_expiry_delta: max_unsent_cltv_delta, + }) + } else { None } + } else { None }, + }) } else if has_err { // If we failed to send any paths, we shouldn't have inserted the new PaymentId into // our `pending_outbound_payments` map at all. @@ -2614,7 +2648,7 @@ impl ChannelMana htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, }); - match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet) { + match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) { Err(e) => { if let ChannelError::Ignore(msg) = e { log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg); @@ -3612,7 +3646,12 @@ impl ChannelMana return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone())); } - let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration) + if !self.default_configuration.accept_inbound_channels { + return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone())); + } + + let channel = Channel::new_from_req(&self.fee_estimator, &self.keys_manager, counterparty_node_id.clone(), + &their_features, msg, 0, &self.default_configuration, self.best_block.read().unwrap().height()) .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?; let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -4817,7 +4856,7 @@ where /// Calls a function which handles an on-chain event (blocks dis/connected, transactions /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by /// the function. - fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>> + fn do_chain_event) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>), ClosureReason>> (&self, height_opt: Option, f: FN) { // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called // during initialization prior to the chain_monitor being fully configured in some cases. @@ -4862,7 +4901,7 @@ where } short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id()); } - } else if let Err(e) = res { + } else if let Err(reason) = res { if let Some(short_id) = channel.get_short_channel_id() { short_to_id.remove(&short_id); } @@ -4874,10 +4913,14 @@ where msg: update }); } - self.issue_channel_close_events(channel, ClosureReason::CommitmentTxConfirmed); + let reason_message = format!("{}", reason); + self.issue_channel_close_events(channel, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.get_counterparty_node_id(), - action: msgs::ErrorAction::SendErrorMessage { msg: e }, + action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { + channel_id: channel.channel_id(), + data: reason_message, + } }, }); return false; } @@ -5796,7 +5839,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = Vec::new(); for _ in 0..channel_count { - let mut channel: Channel = Channel::read(reader, &args.keys_manager)?; + let mut channel: Channel = Channel::read(reader, (&args.keys_manager, best_block_height))?; let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?; funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { @@ -6068,9 +6111,9 @@ mod tests { use ln::msgs; use ln::msgs::ChannelMessageHandler; use routing::router::{Payee, RouteParameters, find_route}; - use routing::scorer::Scorer; use util::errors::APIError; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; + use util::test_utils; #[cfg(feature = "std")] #[test] @@ -6306,7 +6349,7 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let scorer = Scorer::new(0); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); // To start (1), send a regular payment but don't claim it. let expected_route = [&nodes[1]]; @@ -6319,8 +6362,8 @@ mod tests { final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let route = find_route( - &nodes[0].node.get_our_node_id(), ¶ms, - &nodes[0].net_graph_msg_handler.network_graph, None, nodes[0].logger, &scorer + &nodes[0].node.get_our_node_id(), ¶ms, nodes[0].network_graph, None, + nodes[0].logger, &scorer ).unwrap(); nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -6350,8 +6393,8 @@ mod tests { // To start (2), send a keysend payment but don't claim it. let payment_preimage = PaymentPreimage([42; 32]); let route = find_route( - &nodes[0].node.get_our_node_id(), ¶ms, - &nodes[0].net_graph_msg_handler.network_graph, None, nodes[0].logger, &scorer + &nodes[0].node.get_our_node_id(), ¶ms, nodes[0].network_graph, None, + nodes[0].logger, &scorer ).unwrap(); let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap(); check_added_monitors!(nodes[0], 1); @@ -6409,9 +6452,9 @@ mod tests { final_value_msat: 10000, final_cltv_expiry_delta: 40, }; - let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let network_graph = nodes[0].network_graph; let first_hops = nodes[0].node.list_usable_channels(); - let scorer = Scorer::new(0); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); let route = find_route( &payer_pubkey, ¶ms, network_graph, Some(&first_hops.iter().collect::>()), nodes[0].logger, &scorer @@ -6452,9 +6495,9 @@ mod tests { final_value_msat: 10000, final_cltv_expiry_delta: 40, }; - let network_graph = &nodes[0].net_graph_msg_handler.network_graph; + let network_graph = nodes[0].network_graph; let first_hops = nodes[0].node.list_usable_channels(); - let scorer = Scorer::new(0); + let scorer = test_utils::TestScorer::with_fixed_penalty(0); let route = find_route( &payer_pubkey, ¶ms, network_graph, Some(&first_hops.iter().collect::>()), nodes[0].logger, &scorer @@ -6519,7 +6562,7 @@ pub mod bench { use ln::msgs::{ChannelMessageHandler, Init}; use routing::network_graph::NetworkGraph; use routing::router::{Payee, get_route}; - use routing::scorer::Scorer; + use routing::scoring::Scorer; use util::test_utils; use util::config::UserConfig; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose}; @@ -6627,9 +6670,9 @@ pub mod bench { macro_rules! send_payment { ($node_a: expr, $node_b: expr) => { let usable_channels = $node_a.list_usable_channels(); - let payee = Payee::new($node_b.get_our_node_id()) + let payee = Payee::from_node_id($node_b.get_our_node_id()) .with_features(InvoiceFeatures::known()); - let scorer = Scorer::new(0); + let scorer = Scorer::with_fixed_penalty(0); let route = get_route(&$node_a.get_our_node_id(), &payee, &dummy_graph, Some(&usable_channels.iter().map(|r| r).collect::>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer).unwrap();