X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=2be585380058b32ee5115d9e9bfd0d8cc2eb7bc3;hb=2e90ca11ca2063f6984191f028d81af1ba53b1b2;hp=bceae0f8b349ae322d0b7162a8384927dbd94e4c;hpb=1d1323a3d084d4e4466f76f07dba34e8023d51a6;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bceae0f8..2be58538 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -232,6 +232,36 @@ impl Readable for InterceptId { Ok(InterceptId(buf)) } } + +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`]. +pub(crate) enum SentHTLCId { + PreviousHopData { short_channel_id: u64, htlc_id: u64 }, + OutboundRoute { session_priv: SecretKey }, +} +impl SentHTLCId { + pub(crate) fn from_source(source: &HTLCSource) -> Self { + match source { + HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData { + short_channel_id: hop_data.short_channel_id, + htlc_id: hop_data.htlc_id, + }, + HTLCSource::OutboundRoute { session_priv, .. } => + Self::OutboundRoute { session_priv: *session_priv }, + } + } +} +impl_writeable_tlv_based_enum!(SentHTLCId, + (0, PreviousHopData) => { + (0, short_channel_id, required), + (2, htlc_id, required), + }, + (2, OutboundRoute) => { + (0, session_priv, required), + }; +); + + /// Tracks the inbound corresponding to an outbound HTLC #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash #[derive(Clone, PartialEq, Eq)] @@ -3649,14 +3679,14 @@ where /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on /// startup during which time claims that were in-progress at shutdown may be replayed. pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { - self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails); + self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails); } /// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the /// reason for the failure. /// /// See [`FailureCode`] for valid failure codes. - pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) { + pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash); @@ -3671,14 +3701,14 @@ where } /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`]. - fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { + fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason { match failure_code { - FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16), - FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16), + FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16), + FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16), FailureCode::IncorrectOrUnknownPaymentDetails => { let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec(); htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes()); - HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data) + HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data) } } } @@ -7441,6 +7471,10 @@ where probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes()); } + if !channel_closures.is_empty() { + pending_events_read.append(&mut channel_closures); + } + if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() { pending_outbound_payments = Some(pending_outbound_payments_compat); } else if pending_outbound_payments.is_none() { @@ -7449,7 +7483,13 @@ where outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs }); } pending_outbound_payments = Some(outbounds); - } else { + } + let pending_outbounds = OutboundPayments { + pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), + retry_lock: Mutex::new(()) + }; + + { // If we're tracking pending payments, ensure we haven't lost any by looking at the // ChannelMonitor data for any channels for which we do not have authorative state // (i.e. those for which we just force-closed above or we otherwise don't have a @@ -7460,16 +7500,17 @@ where // 0.0.102+ for (_, monitor) in args.channel_monitors.iter() { if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() { - for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() { + for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() { if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source { if path.is_empty() { log_error!(args.logger, "Got an empty path for a pending payment"); return Err(DecodeError::InvalidValue); } + let path_amt = path.last().unwrap().fee_msat; let mut session_priv_bytes = [0; 32]; session_priv_bytes[..].copy_from_slice(&session_priv[..]); - match pending_outbound_payments.as_mut().unwrap().entry(payment_id) { + match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) { hash_map::Entry::Occupied(mut entry) => { let newly_added = entry.get_mut().insert(session_priv_bytes, &path); log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}", @@ -7496,51 +7537,64 @@ where } } } - for (htlc_source, htlc) in monitor.get_all_current_outbound_htlcs() { - if let HTLCSource::PreviousHopData(prev_hop_data) = htlc_source { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint && - info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs` or - // `pending_intercepted_htlcs`, we were apparently not persisted after - // the monitor was when forwarding the payment. - forward_htlcs.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); - false + for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() { + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint && + info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs` or + // `pending_intercepted_htlcs`, we were apparently not persisted after + // the monitor was when forwarding the payment. + forward_htlcs.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + false + } else { true } } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); + pending_events_read.retain(|event| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false } else { true } }); - !forwards.is_empty() - }); - pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id())); - pending_events_read.retain(|event| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); + }, + HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => { + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + // Note that we set `from_onchain` to "false" here, + // deliberately keeping the pending payment around forever. + // Given it should only occur when we have a channel we're + // force-closing for being stale that's okay. + // The alternative would be to wipe the state when claiming, + // generating a `PaymentPathSuccessful` event but regenerating + // it and the `PaymentSent` on every restart until the + // `ChannelMonitor` is removed. + pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger); + pending_events_read = pending_events.into_inner().unwrap(); + } + }, } } } } } - let pending_outbounds = OutboundPayments { - pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), - retry_lock: Mutex::new(()) - }; if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() { // If we have pending HTLCs to forward, assume we either dropped a // `PendingHTLCsForwardable` or the user received it but never processed it as they @@ -7598,10 +7652,6 @@ where let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes()); - if !channel_closures.is_empty() { - pending_events_read.append(&mut channel_closures); - } - let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) { Ok(key) => key, Err(()) => return Err(DecodeError::InvalidValue) @@ -8465,7 +8515,7 @@ mod tests { // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); - assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id, + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, open_channel_msg.temporary_channel_id); // Further, because all of our channels with nodes[0] are inbound, and none of them funded, @@ -8512,7 +8562,7 @@ mod tests { open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes(); } nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg); - assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id, + assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, open_channel_msg.temporary_channel_id); // Of course, however, outbound channels are always allowed @@ -8554,7 +8604,7 @@ mod tests { // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be // rejected. nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); - assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id, + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, open_channel_msg.temporary_channel_id); // but we can still open an outbound channel. @@ -8563,7 +8613,7 @@ mod tests { // but even with such an outbound channel, additional inbound channels will still fail. nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg); - assert_eq!(get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()).channel_id, + assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id, open_channel_msg.temporary_channel_id); } @@ -8619,7 +8669,7 @@ mod tests { } _ => panic!("Unexpected event"), } - assert_eq!(get_err_msg!(nodes[1], last_random_pk).channel_id, + assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id, open_channel_msg.temporary_channel_id); // ...however if we accept the same channel 0conf it should work just fine. @@ -8661,7 +8711,7 @@ mod tests { _ => panic!("Unexpected event"), } - let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id()); + let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg); let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); @@ -8827,7 +8877,7 @@ pub mod bench { let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap()); $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]); $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg); - let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id()); + let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, &$node_a.get_our_node_id()); $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa); $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs); $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id())); @@ -8846,7 +8896,7 @@ pub mod bench { _ => panic!("Failed to generate claim event"), } - let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id()); + let (raa, cs) = do_get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, &$node_b.get_our_node_id()); $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa); $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs); $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));