X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=eb13f6b4aa1901620596a49af90abd3efbdf3603;hb=cf64e3fba52d7b7f6abce352948d78b97bcb9700;hp=15f536789bf43713279e5e231e415d26bf5b1129;hpb=b14927968fdce89f4640172f5d3d1c589043c481;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 15f53678..eb13f6b4 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -181,6 +181,7 @@ pub(super) enum HTLCForwardInfo { pub(crate) struct HTLCPreviousHopData { // Note that this may be an outbound SCID alias for the associated channel. short_channel_id: u64, + user_channel_id: Option, htlc_id: u64, incoming_packet_shared_secret: [u8; 32], phantom_shared_secret: Option<[u8; 32]>, @@ -221,6 +222,17 @@ struct ClaimableHTLC { counterparty_skimmed_fee_msat: Option, } +impl From<&ClaimableHTLC> for events::ClaimedHTLC { + fn from(val: &ClaimableHTLC) -> Self { + events::ClaimedHTLC { + channel_id: val.prev_hop.outpoint.to_channel_id(), + user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0), + cltv_expiry: val.cltv_expiry, + value_msat: val.value, + } + } +} + /// A payment identifier used to uniquely identify a payment to LDK. /// /// This is not exported to bindings users as we just use [u8; 32] directly @@ -496,11 +508,15 @@ struct ClaimingPayment { amount_msat: u64, payment_purpose: events::PaymentPurpose, receiver_node_id: PublicKey, + htlcs: Vec, + sender_intended_value: Option, } impl_writeable_tlv_based!(ClaimingPayment, { (0, amount_msat, required), (2, payment_purpose, required), (4, receiver_node_id, required), + (5, htlcs, optional_vec), + (7, sender_intended_value, option), }); struct ClaimablePayment { @@ -1144,7 +1160,11 @@ where /// could be in the middle of being processed without the direct mutex held. /// /// See `ChannelManager` struct-level documentation for lock order requirements. + #[cfg(not(any(test, feature = "_test_utils")))] pending_events: Mutex)>>, + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) pending_events: Mutex)>>, + /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously. pending_events_processor: AtomicBool, @@ -2894,9 +2914,9 @@ where short_channel_id, amt_to_forward, outgoing_cltv_value }, .. } => { - let next_pk = onion_utils::next_hop_packet_pubkey(&self.secp_ctx, + let next_packet_pk = onion_utils::next_hop_pubkey(&self.secp_ctx, msg.onion_routing_packet.public_key.unwrap(), &shared_secret); - (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_pk)) + (short_channel_id, amt_to_forward, outgoing_cltv_value, Some(next_packet_pk)) }, // We'll do receive checks in [`Self::construct_pending_htlc_info`] so we have access to the // inbound channel's state. @@ -3781,6 +3801,7 @@ where if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: payment.prev_short_channel_id, + user_channel_id: Some(payment.prev_user_channel_id), outpoint: payment.prev_funding_outpoint, htlc_id: payment.prev_htlc_id, incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, @@ -3828,6 +3849,7 @@ where let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -3932,7 +3954,7 @@ where for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _, + prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, .. @@ -3941,6 +3963,7 @@ where log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4022,6 +4045,7 @@ where let claimable_htlc = ClaimableHTLC { prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4051,6 +4075,7 @@ where ); failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, + user_channel_id: $htlc.prev_hop.user_channel_id, outpoint: prev_funding_outpoint, htlc_id: $htlc.prev_hop.htlc_id, incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, @@ -4782,7 +4807,7 @@ where &self.pending_events, &self.logger) { self.push_pending_forwards_ev(); } }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => { + HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint, .. }) => { log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error); let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret); @@ -4869,9 +4894,11 @@ where } } + let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(); + let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat); let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash, ClaimingPayment { amount_msat: payment.htlcs.iter().map(|source| source.value).sum(), - payment_purpose: payment.purpose, receiver_node_id, + payment_purpose: payment.purpose, receiver_node_id, htlcs, sender_intended_value }); if dup_purpose.is_some() { debug_assert!(false, "Shouldn't get a duplicate pending claim event ever"); @@ -5083,12 +5110,18 @@ where self.pending_outbound_payments.finalize_claims(sources, &self.pending_events); } - fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_outpoint: OutPoint) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire), "We don't support claim_htlc claims during startup - monitors may not be available yet"); - self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger); + let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: next_channel_outpoint, + counterparty_node_id: path.hops[0].pubkey, + }; + self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, + session_priv, path, from_onchain, ev_completion_action, &self.pending_events, + &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; @@ -5104,7 +5137,7 @@ where fee_earned_msat, claim_from_onchain_tx: from_onchain, prev_channel_id: Some(prev_outpoint.to_channel_id()), - next_channel_id: Some(next_channel_id), + next_channel_id: Some(next_channel_outpoint.to_channel_id()), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }, downstream_counterparty_and_funding_outpoint: None, @@ -5129,9 +5162,20 @@ where match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => { let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash); - if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment { + if let Some(ClaimingPayment { + amount_msat, + payment_purpose: purpose, + receiver_node_id, + htlcs, + sender_intended_value: sender_intended_total_msat, + }) = payment { self.pending_events.lock().unwrap().push_back((events::Event::PaymentClaimed { - payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id), + payment_hash, + purpose, + amount_msat, + receiver_node_id: Some(receiver_node_id), + htlcs, + sender_intended_total_msat, }, None)); } }, @@ -5879,6 +5923,7 @@ where } fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { + let funding_txo; let (htlc_source, forwarded_htlc_value) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -5890,12 +5935,14 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan) + let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan); + funding_txo = chan.get().context.get_funding_txo().expect("We won't accept a fulfill until funded"); + res }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo); Ok(()) } @@ -6006,6 +6053,7 @@ where log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: forward_info.incoming_shared_secret, @@ -6092,10 +6140,18 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().context.get_funding_txo(); - let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan); + let funding_txo_opt = chan.get().context.get_funding_txo(); + let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { + self.raa_monitor_updates_held( + &peer_state.actions_blocking_raa_monitor_updates, funding_txo, + *counterparty_node_id) + } else { false }; + let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, + chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan); let res = if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, + let funding_txo = funding_txo_opt + .expect("Funding outpoint must have been set for RAA handling to succeed"); + handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan).map(|_| ()) } else { Ok(()) }; (htlcs_to_fail, res) @@ -6270,7 +6326,7 @@ where MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; @@ -7124,6 +7180,7 @@ where if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER { let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: htlc.prev_short_channel_id, + user_channel_id: Some(htlc.prev_user_channel_id), htlc_id: htlc.prev_htlc_id, incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -7752,7 +7809,7 @@ impl Writeable for ChannelDetails { impl Readable for ChannelDetails { fn read(reader: &mut R) -> Result { - _init_and_read_tlv_fields!(reader, { + _init_and_read_len_prefixed_tlv_fields!(reader, { (1, inbound_scid_alias, option), (2, channel_id, required), (3, channel_type, option), @@ -7939,7 +7996,8 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (1, phantom_shared_secret, option), (2, outpoint, required), (4, htlc_id, required), - (6, incoming_packet_shared_secret, required) + (6, incoming_packet_shared_secret, required), + (7, user_channel_id, option), }); impl Writeable for ClaimableHTLC { @@ -7965,7 +8023,7 @@ impl Writeable for ClaimableHTLC { impl Readable for ClaimableHTLC { fn read(reader: &mut R) -> Result { - _init_and_read_tlv_fields!(reader, { + _init_and_read_len_prefixed_tlv_fields!(reader, { (0, prev_hop, required), (1, total_msat, option), (2, value_ser, required), @@ -9019,7 +9077,13 @@ where // generating a `PaymentPathSuccessful` event but regenerating // it and the `PaymentSent` on every restart until the // `ChannelMonitor` is removed. - pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger); + let compl_action = + EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: monitor.get_funding_txo().0, + counterparty_node_id: path.hops[0].pubkey, + }; + pending_outbounds.claim_htlc(payment_id, preimage, session_priv, + path, false, compl_action, &pending_events, &args.logger); pending_events_read = pending_events.into_inner().unwrap(); } }, @@ -9040,7 +9104,7 @@ where // downstream chan is closed (because we don't have a // channel_id -> peer map entry). counterparty_opt.is_none(), - monitor.get_funding_txo().0.to_channel_id())) + monitor.get_funding_txo().0)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -9187,7 +9251,7 @@ where .expect("Failed to get node_id for phantom node recipient"); receiver_node_id = Some(phantom_pubkey) } - for claimable_htlc in payment.htlcs { + for claimable_htlc in &payment.htlcs { claimable_amt_msat += claimable_htlc.value; // Add a holding-cell claim of the payment to the Channel, which should be @@ -9223,6 +9287,8 @@ where payment_hash, purpose: payment.purpose, amount_msat: claimable_amt_msat, + htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(), + sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat), }, None)); } } @@ -9309,12 +9375,12 @@ where channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay { + for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), - downstream_closed, downstream_chan_id); + downstream_closed, downstream_funding); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -9492,6 +9558,7 @@ mod tests { let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -9519,16 +9586,8 @@ mod tests { // Note that successful MPP payments will generate a single PaymentSent event upon the first // path's success and a PaymentPathSuccessful event for each path's success. let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 2); match events[0] { - Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => { - assert_eq!(Some(payment_id), *id); - assert_eq!(payment_preimage, *preimage); - assert_eq!(our_payment_hash, *hash); - }, - _ => panic!("Unexpected event"), - } - match events[1] { Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { assert_eq!(payment_id, *actual_payment_id); assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); @@ -9536,7 +9595,7 @@ mod tests { }, _ => panic!("Unexpected event"), } - match events[2] { + match events[1] { Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { assert_eq!(payment_id, *actual_payment_id); assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());