X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=82b572cda84c77e5610ce4ad7a90dfb8a2d05de7;hb=852cf2b3f2e5145f8a28d5e30eb37c9d5568dcc1;hp=5e0b56d05ab51217c241a9a3d887adae481e5b16;hpb=2c8a26c6d23bb224d61b8fe605f961b3d08def87;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5e0b56d0..82b572cd 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -234,6 +234,36 @@ impl Readable for InterceptId { Ok(InterceptId(buf)) } } + +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`]. +pub(crate) enum SentHTLCId { + PreviousHopData { short_channel_id: u64, htlc_id: u64 }, + OutboundRoute { session_priv: SecretKey }, +} +impl SentHTLCId { + pub(crate) fn from_source(source: &HTLCSource) -> Self { + match source { + HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData { + short_channel_id: hop_data.short_channel_id, + htlc_id: hop_data.htlc_id, + }, + HTLCSource::OutboundRoute { session_priv, .. } => + Self::OutboundRoute { session_priv: *session_priv }, + } + } +} +impl_writeable_tlv_based_enum!(SentHTLCId, + (0, PreviousHopData) => { + (0, short_channel_id, required), + (2, htlc_id, required), + }, + (2, OutboundRoute) => { + (0, session_priv, required), + }; +); + + /// Tracks the inbound corresponding to an outbound HTLC #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash #[derive(Clone, PartialEq, Eq)] @@ -3653,14 +3683,14 @@ where /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on /// startup during which time claims that were in-progress at shutdown may be replayed. pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { - self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails); + self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails); } /// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the /// reason for the failure. /// /// See [`FailureCode`] for valid failure codes. - pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) { + pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash); @@ -3675,14 +3705,14 @@ where } /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`]. - fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { + fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { match failure_code { - FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16), - FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16), + FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16), FailureCode::IncorrectOrUnknownPaymentDetails => { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); - HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data) + HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data) } } } @@ -6774,27 +6804,36 @@ impl Readable for HTLCSource { 0 => { let mut session_priv: crate::util::ser::RequiredWrapper = crate::util::ser::RequiredWrapper(None); let mut first_hop_htlc_msat: u64 = 0; - let mut path = Some(Vec::new()); + let mut path: Option> = Some(Vec::new()); let mut payment_id = None; let mut payment_secret = None; - let mut payment_params = None; + let mut payment_params: Option = None; read_tlv_fields!(reader, { (0, session_priv, required), (1, payment_id, option), (2, first_hop_htlc_msat, required), (3, payment_secret, option), (4, path, vec_type), - (5, payment_params, option), + (5, payment_params, (option: ReadableArgs, 0)), }); if payment_id.is_none() { // For backwards compat, if there was no payment_id written, use the session_priv bytes // instead. payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref())); } + if path.is_none() || path.as_ref().unwrap().is_empty() { + return Err(DecodeError::InvalidValue); + } + let path = path.unwrap(); + if let Some(params) = payment_params.as_mut() { + if params.final_cltv_expiry_delta == 0 { + params.final_cltv_expiry_delta = path.last().unwrap().cltv_expiry_delta; + } + } Ok(HTLCSource::OutboundRoute { session_priv: session_priv.0.unwrap(), first_hop_htlc_msat, - path: path.unwrap(), + path, payment_id: payment_id.unwrap(), payment_secret, payment_params, @@ -6941,7 +6980,10 @@ where let mut monitor_update_blocked_actions_per_peer = None; let mut peer_states = Vec::new(); for (_, peer_state_mutex) in per_peer_state.iter() { - peer_states.push(peer_state_mutex.lock().unwrap()); + // Because we're holding the owning `per_peer_state` write lock here there's no chance + // of a lockorder violation deadlock - no other thread can be holding any + // per_peer_state lock at all. + peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self()); } (serializable_peer_count).write(writer)?; @@ -7433,6 +7475,10 @@ where probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes()); } + if !channel_closures.is_empty() { + pending_events_read.append(&mut channel_closures); + } + if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); } else if pending_outbound_payments.is_none() { @@ -7441,7 +7487,13 @@ where outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); } pending_outbound_payments = Some(outbounds); - } else { + } + let pending_outbounds = OutboundPayments { + pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), + retry_lock: Mutex::new(()) + }; + + { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state // (i.e. those for which we just force-closed above or we otherwise don't have a @@ -7452,16 +7504,17 @@ where // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { - for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() { + for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { if path.is_empty() { log_error!(args.logger, "Got an empty path for a pending payment"); return Err(DecodeError::InvalidValue); } + let path_amt = path.last().unwrap().fee_msat; let mut session_priv_bytes = [0; 32]; session_priv_bytes[..].copy_from_slice(&session_priv[..]); - match pending_outbound_payments.as_mut().unwrap().entry(payment_id) { + match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) { hash_map::Entry::Occupied(mut entry) => { let newly_added = entry.get_mut().insert(session_priv_bytes, &path); log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", @@ -7488,48 +7541,64 @@ where } } } - for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() { - if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint && - info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs` or - // `pending_intercepted_htlcs`, we were apparently not persisted after - // the monitor was when forwarding the payment. - forward_htlcs.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); - false + for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() { + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint && + info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs` or + // `pending_intercepted_htlcs`, we were apparently not persisted after + // the monitor was when forwarding the payment. + forward_htlcs.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + false + } else { true } } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + pending_events_read.retain(|event| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false } else { true } }); - !forwards.is_empty() - }); - pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); - pending_events_read.retain(|event| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); + }, + HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => { + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + // Note that we set `from_onchain` to "false" here, + // deliberately keeping the pending payment around forever. + // Given it should only occur when we have a channel we're + // force-closing for being stale that's okay. + // The alternative would be to wipe the state when claiming, + // generating a `PaymentPathSuccessful` event but regenerating + // it and the `PaymentSent` on every restart until the + // `ChannelMonitor` is removed. + pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger); + pending_events_read = pending_events.into_inner().unwrap(); + } + }, } } } } } - let pending_outbounds = OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), retry_lock: Mutex::new(()) }; if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() { // If we have pending HTLCs to forward, assume we either dropped a // `PendingHTLCsForwardable` or the user received it but never processed it as they @@ -7587,10 +7656,6 @@ where let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); - if !channel_closures.is_empty() { - pending_events_read.append(&mut channel_closures); - } - let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, Err(()) => return Err(DecodeError::InvalidValue) @@ -7992,7 +8057,6 @@ mod tests { let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV), final_value_msat: 100_000, - final_cltv_expiry_delta: TEST_FINAL_CLTV, }; let route = find_route( &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph, @@ -8083,7 +8147,6 @@ mod tests { let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, - final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); @@ -8126,7 +8189,6 @@ mod tests { let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), final_value_msat: 10_000, - final_cltv_expiry_delta: 40, }; let network_graph = nodes[0].network_graph.clone(); let first_hops = nodes[0].node.list_usable_channels(); @@ -8280,10 +8342,10 @@ mod tests { let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); assert!(nodes_0_lock.contains_key(channel_id)); - - assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); } + assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0); + let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); @@ -8291,7 +8353,9 @@ mod tests { let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); assert!(nodes_0_lock.contains_key(channel_id)); + } + { // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as // as it has the funding transaction. let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap(); @@ -8321,7 +8385,9 @@ mod tests { let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap(); assert_eq!(nodes_0_lock.len(), 1); assert!(nodes_0_lock.contains_key(channel_id)); + } + { // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature // from `nodes[0]` for the closing transaction with the proposed fee, the channel is