From 2024c5e1049ef399fb24e5c466924f980e949c69 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 16 Jul 2021 02:16:50 +0000 Subject: [PATCH] Generate a PaymentForwarded event when a forwarded HTLC is claimed It is useful for accounting and informational reasons for users to be informed when a payment has been successfully forwarded. Thus, when an HTLC which represents a forwarded leg is claimed, we generate a new `PaymentForwarded` event. This requires some additional plumbing to return HTLC values from `OnchainEvent`s. Further, when we have to go on-chain to claim the inbound side of the payment, we do not inform the user of the fee reward, as we cannot calculate it until we see what is confirmed on-chain. Substantial code structure rewrites by: Valentine Wallace --- CHANGELOG.md | 14 +++ fuzz/src/chanmon_consistency.rs | 1 + fuzz/src/full_stack.rs | 4 +- lightning/src/chain/channelmonitor.rs | 31 +++-- lightning/src/ln/chanmon_update_fail_tests.rs | 2 + lightning/src/ln/channel.rs | 32 +++-- lightning/src/ln/channelmanager.rs | 117 +++++++++++------- lightning/src/ln/functional_test_utils.rs | 16 +++ lightning/src/ln/functional_tests.rs | 15 ++- lightning/src/ln/reorg_tests.rs | 5 +- lightning/src/util/events.rs | 40 ++++++ 11 files changed, 209 insertions(+), 68 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 764e2ffd..dc2ea744 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,17 @@ +# 0.0.100 - WIP + +## Serialization Compatibility + * HTLCs which were in the process of being claimed on-chain when a pre-0.0.100 + `ChannelMonitor` was serialized may generate `PaymentForwarded` events with + spurious `fee_earned_msat` values. This only applies to payments which were + unresolved at the time of the upgrade. + * 0.0.100 clients with pending PaymentForwarded events at serialization-time + will generate serialized `ChannelManager` objects which 0.0.99 and earlier + clients cannot read. The likelihood of this can be reduced by ensuring you + process all pending events immediately before serialization (as is done by + the `lightning-background-processor` crate). + + # 0.0.99 - 2021-07-09 ## API Updates diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index b879ec9b..70ddac5d 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -805,6 +805,7 @@ pub fn do_test(data: &[u8], out: Out) { }, events::Event::PaymentSent { .. } => {}, events::Event::PaymentFailed { .. } => {}, + events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::PendingHTLCsForwardable { .. } => { nodes[$node].process_pending_htlc_forwards(); }, diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 9398dcb0..d1adf06e 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -596,12 +596,10 @@ pub fn do_test(data: &[u8], logger: &Arc) { //TODO: enhance by fetching random amounts from fuzz input? payments_received.push(payment_hash); }, - Event::PaymentSent {..} => {}, - Event::PaymentFailed {..} => {}, Event::PendingHTLCsForwardable {..} => { should_forward = true; }, - Event::SpendableOutputs {..} => {}, + _ => {}, } } } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 88c3caf5..cec3f41c 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -199,10 +199,12 @@ pub enum MonitorEvent { pub struct HTLCUpdate { pub(crate) payment_hash: PaymentHash, pub(crate) payment_preimage: Option, - pub(crate) source: HTLCSource + pub(crate) source: HTLCSource, + pub(crate) onchain_value_satoshis: Option, } impl_writeable_tlv_based!(HTLCUpdate, { (0, payment_hash, required), + (1, onchain_value_satoshis, option), (2, source, required), (4, payment_preimage, option), }); @@ -385,6 +387,7 @@ enum OnchainEvent { HTLCUpdate { source: HTLCSource, payment_hash: PaymentHash, + onchain_value_satoshis: Option, }, MaturingOutput { descriptor: SpendableOutputDescriptor, @@ -400,6 +403,7 @@ impl_writeable_tlv_based!(OnchainEventEntry, { impl_writeable_tlv_based_enum!(OnchainEvent, (0, HTLCUpdate) => { (0, source, required), + (1, onchain_value_satoshis, option), (2, payment_hash, required), }, (1, MaturingOutput) => { @@ -1574,6 +1578,7 @@ impl ChannelMonitorImpl { event: OnchainEvent::HTLCUpdate { source: (**source).clone(), payment_hash: htlc.payment_hash.clone(), + onchain_value_satoshis: Some(htlc.amount_msat / 1000), }, }; log_info!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of revoked counterparty commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold()); @@ -1641,6 +1646,7 @@ impl ChannelMonitorImpl { event: OnchainEvent::HTLCUpdate { source: (**source).clone(), payment_hash: htlc.payment_hash.clone(), + onchain_value_satoshis: Some(htlc.amount_msat / 1000), }, }); } @@ -1825,6 +1831,7 @@ impl ChannelMonitorImpl { height, event: OnchainEvent::HTLCUpdate { source: source.clone(), payment_hash: htlc.payment_hash, + onchain_value_satoshis: Some(htlc.amount_msat / 1000) }, }; log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})", @@ -2087,7 +2094,7 @@ impl ChannelMonitorImpl { // Produce actionable events from on-chain events having reached their threshold. for entry in onchain_events_reaching_threshold_conf.drain(..) { match entry.event { - OnchainEvent::HTLCUpdate { ref source, payment_hash } => { + OnchainEvent::HTLCUpdate { ref source, payment_hash, onchain_value_satoshis } => { // Check for duplicate HTLC resolutions. #[cfg(debug_assertions)] { @@ -2106,9 +2113,10 @@ impl ChannelMonitorImpl { log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0)); self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { - payment_hash: payment_hash, + payment_hash, payment_preimage: None, source: source.clone(), + onchain_value_satoshis, })); }, OnchainEvent::MaturingOutput { descriptor } => { @@ -2325,7 +2333,7 @@ impl ChannelMonitorImpl { if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat { if let &Some(ref source) = pending_source { log_claim!("revoked counterparty commitment tx", false, pending_htlc, true); - payment_data = Some(((**source).clone(), $htlc_output.payment_hash)); + payment_data = Some(((**source).clone(), $htlc_output.payment_hash, $htlc_output.amount_msat)); break; } } @@ -2345,7 +2353,7 @@ impl ChannelMonitorImpl { // transaction. This implies we either learned a preimage, the HTLC // has timed out, or we screwed up. In any case, we should now // resolve the source HTLC with the original sender. - payment_data = Some(((*source).clone(), htlc_output.payment_hash)); + payment_data = Some(((*source).clone(), htlc_output.payment_hash, htlc_output.amount_msat)); } else if !$holder_tx { check_htlc_valid_counterparty!(self.current_counterparty_commitment_txid, htlc_output); if payment_data.is_none() { @@ -2378,7 +2386,7 @@ impl ChannelMonitorImpl { // Check that scan_commitment, above, decided there is some source worth relaying an // HTLC resolution backwards to and figure out whether we learned a preimage from it. - if let Some((source, payment_hash)) = payment_data { + if let Some((source, payment_hash, amount_msat)) = payment_data { let mut payment_preimage = PaymentPreimage([0; 32]); if accepted_preimage_claim { if !self.pending_monitor_events.iter().any( @@ -2387,7 +2395,8 @@ impl ChannelMonitorImpl { self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { source, payment_preimage: Some(payment_preimage), - payment_hash + payment_hash, + onchain_value_satoshis: Some(amount_msat / 1000), })); } } else if offered_preimage_claim { @@ -2399,7 +2408,8 @@ impl ChannelMonitorImpl { self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { source, payment_preimage: Some(payment_preimage), - payment_hash + payment_hash, + onchain_value_satoshis: Some(amount_msat / 1000), })); } } else { @@ -2415,7 +2425,10 @@ impl ChannelMonitorImpl { let entry = OnchainEventEntry { txid: tx.txid(), height, - event: OnchainEvent::HTLCUpdate { source: source, payment_hash: payment_hash }, + event: OnchainEvent::HTLCUpdate { + source, payment_hash, + onchain_value_satoshis: Some(amount_msat / 1000), + }, }; log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold()); self.onchain_events_awaiting_threshold_conf.push(entry); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 90519f28..c3a2f5af 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1159,6 +1159,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -2317,6 +2318,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); } nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); let mut bs_updates = None; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e08b8af4..b9a63f1c 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -306,6 +306,7 @@ pub struct CounterpartyForwardingInfo { enum UpdateFulfillFetch { NewClaim { monitor_update: ChannelMonitorUpdate, + htlc_value_msat: u64, msg: Option, }, DuplicateClaim {}, @@ -319,6 +320,8 @@ pub enum UpdateFulfillCommitFetch { NewClaim { /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor monitor_update: ChannelMonitorUpdate, + /// The value of the HTLC which was claimed, in msat. + htlc_value_msat: u64, /// The update_fulfill message and commitment_signed message (if the claim was not placed /// in the holding cell). msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, @@ -336,6 +339,9 @@ pub enum UpdateFulfillCommitFetch { // Holder designates channel data owned for the benefice of the user client. // Counterparty designates channel data owned by the another channel participant entity. pub(super) struct Channel { + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) config: ChannelConfig, + #[cfg(not(any(test, feature = "_test_utils")))] config: ChannelConfig, user_id: u64, @@ -1275,6 +1281,7 @@ impl Channel { // these, but for now we just have to treat them as normal. let mut pending_idx = core::usize::MAX; + let mut htlc_value_msat = 0; for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { assert_eq!(htlc.payment_hash, payment_hash_calc); @@ -1294,6 +1301,7 @@ impl Channel { } } pending_idx = idx; + htlc_value_msat = htlc.amount_msat; break; } } @@ -1335,7 +1343,7 @@ impl Channel { // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } }, _ => {} @@ -1347,7 +1355,7 @@ impl Channel { }); #[cfg(any(test, feature = "fuzztarget"))] self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); - return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } #[cfg(any(test, feature = "fuzztarget"))] self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); @@ -1357,7 +1365,7 @@ impl Channel { if let InboundHTLCState::Committed = htlc.state { } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); @@ -1365,6 +1373,7 @@ impl Channel { UpdateFulfillFetch::NewClaim { monitor_update, + htlc_value_msat, msg: Some(msgs::UpdateFulfillHTLC { channel_id: self.channel_id(), htlc_id: htlc_id_arg, @@ -1375,7 +1384,7 @@ impl Channel { pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result where L::Target: Logger { match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { - UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => { + UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => { let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) { Err(e) => return Err((e, monitor_update)), Ok(res) => res @@ -1384,9 +1393,10 @@ impl Channel { // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) }) + Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) }) }, - UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }), + UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => + Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }), UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}), } } @@ -2163,7 +2173,7 @@ impl Channel { /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] - fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option, fail_reason: Option) -> Result<&HTLCSource, ChannelError> { + fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option, fail_reason: Option) -> Result<&OutboundHTLCOutput, ChannelError> { for htlc in self.pending_outbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { match check_preimage { @@ -2182,13 +2192,13 @@ impl Channel { OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) => return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))), } - return Ok(&htlc.source); + return Ok(htlc); } } Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) } - pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result { + pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); } @@ -2197,7 +2207,7 @@ impl Channel { } let payment_hash = PaymentHash(Sha256::hash(&msg.payment_preimage.0[..]).into_inner()); - self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|source| source.clone()) + self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat)) } pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { @@ -2496,7 +2506,7 @@ impl Channel { // in it hitting the holding cell again and we cannot change the state of a // holding cell HTLC from fulfill to anything else. let (update_fulfill_msg_option, mut additional_monitor_update) = - if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { + if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { (msg, monitor_update) } else { unreachable!() }; update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6ebe802b..888724c0 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -207,6 +207,14 @@ pub(super) enum HTLCFailReason { } } +/// Return value for claim_funds_from_hop +enum ClaimFundsFromHop { + PrevHopForceClosed, + MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option), + Success(u64), + DuplicateClaim, +} + type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a @@ -2787,16 +2795,16 @@ impl ChannelMana HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }); } else { match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { - Err(Some(e)) => { - if let msgs::ErrorAction::IgnoreError = e.1.err.action { + ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { + if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the // HTLC when the monitor updating is restored (or on chain). - log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err); + log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); claimed_any_htlcs = true; - } else { errs.push(e); } + } else { errs.push((pk, err)); } }, - Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"), - Ok(()) => claimed_any_htlcs = true, + ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"), + _ => claimed_any_htlcs = true, } } } @@ -2814,28 +2822,29 @@ impl ChannelMana } else { false } } - fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> { + fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { Some(chan_id) => chan_id.clone(), None => { - return Err(None) + return ClaimFundsFromHop::PrevHopForceClosed } }; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { - if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option { + if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, "Failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, e); - return Err(Some(( + return ClaimFundsFromHop::MonitorUpdateFail( chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), - ))); + Some(htlc_value_msat) + ); } if let Some((msg, commitment_signed)) = msgs { log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", @@ -2852,8 +2861,10 @@ impl ChannelMana } }); } + return ClaimFundsFromHop::Success(htlc_value_msat); + } else { + return ClaimFundsFromHop::DuplicateClaim; } - return Ok(()) }, Err((e, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { @@ -2866,13 +2877,13 @@ impl ChannelMana if drop { chan.remove_entry(); } - return Err(Some((counterparty_node_id, res))); + return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); }, } } else { unreachable!(); } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { match source { HTLCSource::OutboundRoute { session_priv, .. } => { mem::drop(channel_state_lock); @@ -2891,29 +2902,51 @@ impl ChannelMana }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) { - Ok(()) => Ok(()), - Err(None) => { - let preimage_update = ChannelMonitorUpdate { - update_id: CLOSED_CHANNEL_UPDATE_ID, - updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage.clone(), - }], - }; - // We update the ChannelMonitor on the backward link, after - // receiving an offchain preimage event from the forward link (the - // event being update_fulfill_htlc). - if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { - log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, e); - } - Ok(()) - }, - Err(Some(res)) => Err(res), - } { - mem::drop(channel_state_lock); - let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); + let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage); + let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true }; + let htlc_claim_value_msat = match res { + ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt, + ClaimFundsFromHop::Success(amt) => Some(amt), + _ => None, + }; + if let ClaimFundsFromHop::PrevHopForceClosed = res { + let preimage_update = ChannelMonitorUpdate { + update_id: CLOSED_CHANNEL_UPDATE_ID, + updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage.clone(), + }], + }; + // We update the ChannelMonitor on the backward link, after + // receiving an offchain preimage event from the forward link (the + // event being update_fulfill_htlc). + if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { + log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + } + // Note that we do *not* set `claimed_htlc` to false here. In fact, this + // totally could be a duplicate claim, but we have no way of knowing + // without interrogating the `ChannelMonitor` we've provided the above + // update to. Instead, we simply document in `PaymentForwarded` that this + // can happen. + } + mem::drop(channel_state_lock); + if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res { + let result: Result<(), _> = Err(err); + let _ = handle_error!(self, result, pk); + } + + if claimed_htlc { + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { None }; + + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentForwarded { + fee_earned_msat, + claim_from_onchain_tx: from_onchain, + }); + } } }, } @@ -3309,7 +3342,7 @@ impl ChannelMana fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); - let htlc_source = { + let (htlc_source, forwarded_htlc_value) = { let channel_state = &mut *channel_lock; match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { @@ -3321,7 +3354,7 @@ impl ChannelMana hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone()); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false); Ok(()) } @@ -3688,14 +3721,14 @@ impl ChannelMana /// Process pending events from the `chain::Watch`, returning whether any events were processed. fn process_pending_monitor_events(&self) -> bool { let mut failed_channels = Vec::new(); - let pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); + let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for monitor_event in pending_monitor_events { + for monitor_event in pending_monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index d1d322bf..bea2990c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1005,6 +1005,20 @@ macro_rules! expect_payment_sent { } } +macro_rules! expect_payment_forwarded { + ($node: expr, $expected_fee: expr, $upstream_force_closed: expr) => { + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => { + assert_eq!(fee_earned_msat, $expected_fee); + assert_eq!(claim_from_onchain_tx, $upstream_force_closed); + }, + _ => panic!("Unexpected event"), + } + } +} + #[cfg(test)] macro_rules! expect_payment_failure_chan_update { ($node: expr, $scid: expr, $chan_closed: expr) => { @@ -1169,6 +1183,8 @@ pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, exp ($node: expr, $prev_node: expr, $new_msgs: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); + let fee = $node.node.channel_state.lock().unwrap().by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap().config.forwarding_fee_base_msat; + expect_payment_forwarded!($node, Some(fee as u64), false); check_added_monitors!($node, 1); let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index be23e522..6dec20b2 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -886,6 +886,7 @@ fn updates_shutdown_wait() { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -1060,6 +1061,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -2832,6 +2834,12 @@ fn test_htlc_on_chain_success() { assert_eq!(added_monitors[0].0.txid, chan_2.3.txid()); added_monitors.clear(); } + let forwarded_events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(forwarded_events.len(), 2); + if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] { + } else { panic!(); } + if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] { + } else { panic!(); } let events = nodes[1].node.get_and_clear_pending_msg_events(); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -5329,6 +5337,8 @@ fn test_onchain_to_onchain_claim() { // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}); + check_added_monitors!(nodes[1], 1); + expect_payment_forwarded!(nodes[1], Some(1000), true); { let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelMonitor: claim tx @@ -5336,7 +5346,6 @@ fn test_onchain_to_onchain_claim() { check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager b_txn.clear(); } - check_added_monitors!(nodes[1], 1); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); check_added_monitors!(nodes[1], 1); @@ -5487,7 +5496,10 @@ fn test_duplicate_payment_hash_one_failure_one_success() { expect_payment_failed!(nodes[0], duplicate_payment_hash, false); // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C + // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee + // and nodes[2] fee) is rounded down and then claimed in full. mine_transaction(&nodes[1], &htlc_success_txn[0]); + expect_payment_forwarded!(nodes[1], Some(196*2), true); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -9146,6 +9158,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false); // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage. if !go_onchain_before_fulfill && broadcast_alice { let events = nodes[1].node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 9946cc24..4e13478d 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -132,8 +132,9 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { connect_block(&nodes[1], &block); // ChannelManager only polls chain::Watch::release_pending_monitor_events when we - // probe it for events, so we probe non-message events here (which should still end up empty): - assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); + // probe it for events, so we probe non-message events here (which should just be the + // PaymentForwarded event). + expect_payment_forwarded!(nodes[1], Some(1000), true); } else { // Confirm the timeout tx and check that we fail the HTLC backwards let block = Block { diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 33767010..34b0b331 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -150,6 +150,27 @@ pub enum Event { /// The outputs which you should store as spendable by you. outputs: Vec, }, + /// This event is generated when a payment has been successfully forwarded through us and a + /// forwarding fee earned. + PaymentForwarded { + /// The fee, in milli-satoshis, which was earned as a result of the payment. + /// + /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC + /// was pending, the amount the next hop claimed will have been rounded down to the nearest + /// whole satoshi. Thus, the fee calculated here may be higher than expected as we still + /// claimed the full value in millisatoshis from the source. In this case, + /// `claim_from_onchain_tx` will be set. + /// + /// If the channel which sent us the payment has been force-closed, we will claim the funds + /// via an on-chain transaction. In that case we do not yet know the on-chain transaction + /// fees which we will spend and will instead set this to `None`. It is possible duplicate + /// `PaymentForwarded` events are generated for the same payment iff `fee_earned_msat` is + /// `None`. + fee_earned_msat: Option, + /// If this is `true`, the forwarded HTLC was claimed by our counterparty via an on-chain + /// transaction. + claim_from_onchain_tx: bool, + }, } impl Writeable for Event { @@ -217,6 +238,13 @@ impl Writeable for Event { (0, VecWriteWrapper(outputs), required), }); }, + &Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => { + 7u8.write(writer)?; + write_tlv_fields!(writer, { + (0, fee_earned_msat, option), + (2, claim_from_onchain_tx, required), + }); + }, } Ok(()) } @@ -312,6 +340,18 @@ impl MaybeReadable for Event { }; f() }, + 7u8 => { + let f = || { + let mut fee_earned_msat = None; + let mut claim_from_onchain_tx = false; + read_tlv_fields!(reader, { + (0, fee_earned_msat, option), + (2, claim_from_onchain_tx, required), + }); + Ok(Some(Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx })) + }; + f() + }, // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue. x if x % 2 == 1 => Ok(None), _ => Err(msgs::DecodeError::InvalidValue) -- 2.30.2