From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Wed, 4 Aug 2021 22:58:14 +0000 (+0000) Subject: Merge pull request #1004 from TheBlueMatt/2021-07-forward-event X-Git-Tag: v0.0.100~10 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=69ee4860848d5992b238eca3343141004d9d1572;hp=09e167019589dcfc5ee9675ad243b337659eafc7;p=rust-lightning Merge pull request #1004 from TheBlueMatt/2021-07-forward-event Add a `PaymentForwarded` Event --- diff --git a/CHANGELOG.md b/CHANGELOG.md index 764e2ffd..dc2ea744 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,17 @@ +# 0.0.100 - WIP + +## Serialization Compatibility + * HTLCs which were in the process of being claimed on-chain when a pre-0.0.100 + `ChannelMonitor` was serialized may generate `PaymentForwarded` events with + spurious `fee_earned_msat` values. This only applies to payments which were + unresolved at the time of the upgrade. + * 0.0.100 clients with pending PaymentForwarded events at serialization-time + will generate serialized `ChannelManager` objects which 0.0.99 and earlier + clients cannot read. The likelihood of this can be reduced by ensuring you + process all pending events immediately before serialization (as is done by + the `lightning-background-processor` crate). + + # 0.0.99 - 2021-07-09 ## API Updates diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index b879ec9b..70ddac5d 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -805,6 +805,7 @@ pub fn do_test(data: &[u8], out: Out) { }, events::Event::PaymentSent { .. } => {}, events::Event::PaymentFailed { .. } => {}, + events::Event::PaymentForwarded { .. } if $node == 1 => {}, events::Event::PendingHTLCsForwardable { .. } => { nodes[$node].process_pending_htlc_forwards(); }, diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 9398dcb0..d1adf06e 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -596,12 +596,10 @@ pub fn do_test(data: &[u8], logger: &Arc) { //TODO: enhance by fetching random amounts from fuzz input? payments_received.push(payment_hash); }, - Event::PaymentSent {..} => {}, - Event::PaymentFailed {..} => {}, Event::PendingHTLCsForwardable {..} => { should_forward = true; }, - Event::SpendableOutputs {..} => {}, + _ => {}, } } } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 6e2ac3fc..7904d9bd 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -199,10 +199,12 @@ pub enum MonitorEvent { pub struct HTLCUpdate { pub(crate) payment_hash: PaymentHash, pub(crate) payment_preimage: Option, - pub(crate) source: HTLCSource + pub(crate) source: HTLCSource, + pub(crate) onchain_value_satoshis: Option, } impl_writeable_tlv_based!(HTLCUpdate, { (0, payment_hash, required), + (1, onchain_value_satoshis, option), (2, source, required), (4, payment_preimage, option), }); @@ -385,6 +387,7 @@ enum OnchainEvent { HTLCUpdate { source: HTLCSource, payment_hash: PaymentHash, + onchain_value_satoshis: Option, }, MaturingOutput { descriptor: SpendableOutputDescriptor, @@ -400,6 +403,7 @@ impl_writeable_tlv_based!(OnchainEventEntry, { impl_writeable_tlv_based_enum!(OnchainEvent, (0, HTLCUpdate) => { (0, source, required), + (1, onchain_value_satoshis, option), (2, payment_hash, required), }, (1, MaturingOutput) => { @@ -1574,6 +1578,7 @@ impl ChannelMonitorImpl { event: OnchainEvent::HTLCUpdate { source: (**source).clone(), payment_hash: htlc.payment_hash.clone(), + onchain_value_satoshis: Some(htlc.amount_msat / 1000), }, }; log_info!(logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of revoked counterparty commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold()); @@ -1641,6 +1646,7 @@ impl ChannelMonitorImpl { event: OnchainEvent::HTLCUpdate { source: (**source).clone(), payment_hash: htlc.payment_hash.clone(), + onchain_value_satoshis: Some(htlc.amount_msat / 1000), }, }); } @@ -1779,27 +1785,6 @@ impl ChannelMonitorImpl { let mut claim_requests = Vec::new(); let mut watch_outputs = Vec::new(); - macro_rules! wait_threshold_conf { - ($source: expr, $commitment_tx: expr, $payment_hash: expr) => { - self.onchain_events_awaiting_threshold_conf.retain(|ref entry| { - if entry.height != height { return true; } - match entry.event { - OnchainEvent::HTLCUpdate { source: ref update_source, .. } => { - *update_source != $source - }, - _ => true, - } - }); - let entry = OnchainEventEntry { - txid: commitment_txid, - height, - event: OnchainEvent::HTLCUpdate { source: $source, payment_hash: $payment_hash }, - }; - log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, entry.confirmation_threshold()); - self.onchain_events_awaiting_threshold_conf.push(entry); - } - } - macro_rules! append_onchain_update { ($updates: expr, $to_watch: expr) => { claim_requests = $updates.0; @@ -1828,11 +1813,30 @@ impl ChannelMonitorImpl { } macro_rules! fail_dust_htlcs_after_threshold_conf { - ($holder_tx: expr) => { + ($holder_tx: expr, $commitment_tx: expr) => { for &(ref htlc, _, ref source) in &$holder_tx.htlc_outputs { if htlc.transaction_output_index.is_none() { if let &Some(ref source) = source { - wait_threshold_conf!(source.clone(), "lastest", htlc.payment_hash.clone()); + self.onchain_events_awaiting_threshold_conf.retain(|ref entry| { + if entry.height != height { return true; } + match entry.event { + OnchainEvent::HTLCUpdate { source: ref update_source, .. } => { + update_source != source + }, + _ => true, + } + }); + let entry = OnchainEventEntry { + txid: commitment_txid, + height, + event: OnchainEvent::HTLCUpdate { + source: source.clone(), payment_hash: htlc.payment_hash, + onchain_value_satoshis: Some(htlc.amount_msat / 1000) + }, + }; + log_trace!(logger, "Failing HTLC with payment_hash {} from {} holder commitment tx due to broadcast of transaction, waiting confirmation (at height{})", + log_bytes!(htlc.payment_hash.0), $commitment_tx, entry.confirmation_threshold()); + self.onchain_events_awaiting_threshold_conf.push(entry); } } } @@ -1840,9 +1844,9 @@ impl ChannelMonitorImpl { } if is_holder_tx { - fail_dust_htlcs_after_threshold_conf!(self.current_holder_commitment_tx); + fail_dust_htlcs_after_threshold_conf!(self.current_holder_commitment_tx, "latest"); if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { - fail_dust_htlcs_after_threshold_conf!(holder_tx); + fail_dust_htlcs_after_threshold_conf!(holder_tx, "previous"); } } @@ -2090,7 +2094,7 @@ impl ChannelMonitorImpl { // Produce actionable events from on-chain events having reached their threshold. for entry in onchain_events_reaching_threshold_conf.drain(..) { match entry.event { - OnchainEvent::HTLCUpdate { ref source, payment_hash } => { + OnchainEvent::HTLCUpdate { ref source, payment_hash, onchain_value_satoshis } => { // Check for duplicate HTLC resolutions. #[cfg(debug_assertions)] { @@ -2109,9 +2113,10 @@ impl ChannelMonitorImpl { log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0)); self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { - payment_hash: payment_hash, + payment_hash, payment_preimage: None, source: source.clone(), + onchain_value_satoshis, })); }, OnchainEvent::MaturingOutput { descriptor } => { @@ -2328,7 +2333,7 @@ impl ChannelMonitorImpl { if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat { if let &Some(ref source) = pending_source { log_claim!("revoked counterparty commitment tx", false, pending_htlc, true); - payment_data = Some(((**source).clone(), $htlc_output.payment_hash)); + payment_data = Some(((**source).clone(), $htlc_output.payment_hash, $htlc_output.amount_msat)); break; } } @@ -2348,7 +2353,7 @@ impl ChannelMonitorImpl { // transaction. This implies we either learned a preimage, the HTLC // has timed out, or we screwed up. In any case, we should now // resolve the source HTLC with the original sender. - payment_data = Some(((*source).clone(), htlc_output.payment_hash)); + payment_data = Some(((*source).clone(), htlc_output.payment_hash, htlc_output.amount_msat)); } else if !$holder_tx { check_htlc_valid_counterparty!(self.current_counterparty_commitment_txid, htlc_output); if payment_data.is_none() { @@ -2381,7 +2386,7 @@ impl ChannelMonitorImpl { // Check that scan_commitment, above, decided there is some source worth relaying an // HTLC resolution backwards to and figure out whether we learned a preimage from it. - if let Some((source, payment_hash)) = payment_data { + if let Some((source, payment_hash, amount_msat)) = payment_data { let mut payment_preimage = PaymentPreimage([0; 32]); if accepted_preimage_claim { if !self.pending_monitor_events.iter().any( @@ -2390,7 +2395,8 @@ impl ChannelMonitorImpl { self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { source, payment_preimage: Some(payment_preimage), - payment_hash + payment_hash, + onchain_value_satoshis: Some(amount_msat / 1000), })); } } else if offered_preimage_claim { @@ -2402,7 +2408,8 @@ impl ChannelMonitorImpl { self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { source, payment_preimage: Some(payment_preimage), - payment_hash + payment_hash, + onchain_value_satoshis: Some(amount_msat / 1000), })); } } else { @@ -2418,7 +2425,10 @@ impl ChannelMonitorImpl { let entry = OnchainEventEntry { txid: tx.txid(), height, - event: OnchainEvent::HTLCUpdate { source: source, payment_hash: payment_hash }, + event: OnchainEvent::HTLCUpdate { + source, payment_hash, + onchain_value_satoshis: Some(amount_msat / 1000), + }, }; log_info!(logger, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height {})", log_bytes!(payment_hash.0), entry.confirmation_threshold()); self.onchain_events_awaiting_threshold_conf.push(entry); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 00b68134..720190ec 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1160,6 +1160,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -2318,6 +2319,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); } nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); let mut bs_updates = None; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 10641d0a..f80377b7 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -307,6 +307,7 @@ pub struct CounterpartyForwardingInfo { enum UpdateFulfillFetch { NewClaim { monitor_update: ChannelMonitorUpdate, + htlc_value_msat: u64, msg: Option, }, DuplicateClaim {}, @@ -320,6 +321,8 @@ pub enum UpdateFulfillCommitFetch { NewClaim { /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor monitor_update: ChannelMonitorUpdate, + /// The value of the HTLC which was claimed, in msat. + htlc_value_msat: u64, /// The update_fulfill message and commitment_signed message (if the claim was not placed /// in the holding cell). msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, @@ -337,6 +340,9 @@ pub enum UpdateFulfillCommitFetch { // Holder designates channel data owned for the benefice of the user client. // Counterparty designates channel data owned by the another channel participant entity. pub(super) struct Channel { + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) config: ChannelConfig, + #[cfg(not(any(test, feature = "_test_utils")))] config: ChannelConfig, user_id: u64, @@ -1276,6 +1282,7 @@ impl Channel { // these, but for now we just have to treat them as normal. let mut pending_idx = core::usize::MAX; + let mut htlc_value_msat = 0; for (idx, htlc) in self.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { assert_eq!(htlc.payment_hash, payment_hash_calc); @@ -1295,6 +1302,7 @@ impl Channel { } } pending_idx = idx; + htlc_value_msat = htlc.amount_msat; break; } } @@ -1336,7 +1344,7 @@ impl Channel { // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } }, _ => {} @@ -1348,7 +1356,7 @@ impl Channel { }); #[cfg(any(test, feature = "fuzztarget"))] self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); - return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } #[cfg(any(test, feature = "fuzztarget"))] self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); @@ -1358,7 +1366,7 @@ impl Channel { if let InboundHTLCState::Committed = htlc.state { } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; + return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); @@ -1366,6 +1374,7 @@ impl Channel { UpdateFulfillFetch::NewClaim { monitor_update, + htlc_value_msat, msg: Some(msgs::UpdateFulfillHTLC { channel_id: self.channel_id(), htlc_id: htlc_id_arg, @@ -1376,7 +1385,7 @@ impl Channel { pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result where L::Target: Logger { match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { - UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => { + UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => { let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) { Err(e) => return Err((e, monitor_update)), Ok(res) => res @@ -1385,9 +1394,10 @@ impl Channel { // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) }) + Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) }) }, - UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }), + UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => + Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }), UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}), } } @@ -2164,7 +2174,7 @@ impl Channel { /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed #[inline] - fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option, fail_reason: Option) -> Result<&HTLCSource, ChannelError> { + fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option, fail_reason: Option) -> Result<&OutboundHTLCOutput, ChannelError> { for htlc in self.pending_outbound_htlcs.iter_mut() { if htlc.htlc_id == htlc_id { match check_preimage { @@ -2183,13 +2193,13 @@ impl Channel { OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) => return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))), } - return Ok(&htlc.source); + return Ok(htlc); } } Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned())) } - pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result { + pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned())); } @@ -2198,7 +2208,7 @@ impl Channel { } let payment_hash = PaymentHash(Sha256::hash(&msg.payment_preimage.0[..]).into_inner()); - self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|source| source.clone()) + self.mark_outbound_htlc_removed(msg.htlc_id, Some(payment_hash), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat)) } pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> { @@ -2497,7 +2507,7 @@ impl Channel { // in it hitting the holding cell again and we cannot change the state of a // holding cell HTLC from fulfill to anything else. let (update_fulfill_msg_option, mut additional_monitor_update) = - if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { + if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { (msg, monitor_update) } else { unreachable!() }; update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7e42705c..afbfd0ee 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -208,6 +208,14 @@ pub(super) enum HTLCFailReason { } } +/// Return value for claim_funds_from_hop +enum ClaimFundsFromHop { + PrevHopForceClosed, + MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option), + Success(u64), + DuplicateClaim, +} + type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a @@ -2788,16 +2796,22 @@ impl ChannelMana HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }); } else { match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { - Err(Some(e)) => { - if let msgs::ErrorAction::IgnoreError = e.1.err.action { + ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { + if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the // HTLC when the monitor updating is restored (or on chain). - log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", e.1.err.err); + log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); claimed_any_htlcs = true; - } else { errs.push(e); } + } else { errs.push((pk, err)); } + }, + ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"), + ClaimFundsFromHop::DuplicateClaim => { + // While we should never get here in most cases, if we do, it likely + // indicates that the HTLC was timed out some time ago and is no longer + // available to be claimed. Thus, it does not make sense to set + // `claimed_any_htlcs`. }, - Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"), - Ok(()) => claimed_any_htlcs = true, + ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true, } } } @@ -2815,28 +2829,29 @@ impl ChannelMana } else { false } } - fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> { + fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop { //TODO: Delay the claimed_funds relaying just like we do outbound relay! let channel_state = &mut **channel_state_lock; let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) { Some(chan_id) => chan_id.clone(), None => { - return Err(None) + return ClaimFundsFromHop::PrevHopForceClosed } }; if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { - if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option { + if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, "Failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, e); - return Err(Some(( + return ClaimFundsFromHop::MonitorUpdateFail( chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), - ))); + Some(htlc_value_msat) + ); } if let Some((msg, commitment_signed)) = msgs { log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", @@ -2853,8 +2868,10 @@ impl ChannelMana } }); } + return ClaimFundsFromHop::Success(htlc_value_msat); + } else { + return ClaimFundsFromHop::DuplicateClaim; } - return Ok(()) }, Err((e, monitor_update)) => { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { @@ -2867,13 +2884,13 @@ impl ChannelMana if drop { chan.remove_entry(); } - return Err(Some((counterparty_node_id, res))); + return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None); }, } } else { unreachable!(); } } - fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage) { + fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool) { match source { HTLCSource::OutboundRoute { session_priv, .. } => { mem::drop(channel_state_lock); @@ -2892,29 +2909,51 @@ impl ChannelMana }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; - if let Err((counterparty_node_id, err)) = match self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage) { - Ok(()) => Ok(()), - Err(None) => { - let preimage_update = ChannelMonitorUpdate { - update_id: CLOSED_CHANNEL_UPDATE_ID, - updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: payment_preimage.clone(), - }], - }; - // We update the ChannelMonitor on the backward link, after - // receiving an offchain preimage event from the forward link (the - // event being update_fulfill_htlc). - if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { - log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, e); - } - Ok(()) - }, - Err(Some(res)) => Err(res), - } { - mem::drop(channel_state_lock); - let res: Result<(), _> = Err(err); - let _ = handle_error!(self, res, counterparty_node_id); + let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage); + let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true }; + let htlc_claim_value_msat = match res { + ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt, + ClaimFundsFromHop::Success(amt) => Some(amt), + _ => None, + }; + if let ClaimFundsFromHop::PrevHopForceClosed = res { + let preimage_update = ChannelMonitorUpdate { + update_id: CLOSED_CHANNEL_UPDATE_ID, + updates: vec![ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: payment_preimage.clone(), + }], + }; + // We update the ChannelMonitor on the backward link, after + // receiving an offchain preimage event from the forward link (the + // event being update_fulfill_htlc). + if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { + log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + } + // Note that we do *not* set `claimed_htlc` to false here. In fact, this + // totally could be a duplicate claim, but we have no way of knowing + // without interrogating the `ChannelMonitor` we've provided the above + // update to. Instead, we simply document in `PaymentForwarded` that this + // can happen. + } + mem::drop(channel_state_lock); + if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res { + let result: Result<(), _> = Err(err); + let _ = handle_error!(self, result, pk); + } + + if claimed_htlc { + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) + } else { None }; + + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentForwarded { + fee_earned_msat, + claim_from_onchain_tx: from_onchain, + }); + } } }, } @@ -3310,7 +3349,7 @@ impl ChannelMana fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { let mut channel_lock = self.channel_state.lock().unwrap(); - let htlc_source = { + let (htlc_source, forwarded_htlc_value) = { let channel_state = &mut *channel_lock; match channel_state.by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { @@ -3322,7 +3361,7 @@ impl ChannelMana hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone()); + self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false); Ok(()) } @@ -3689,14 +3728,14 @@ impl ChannelMana /// Process pending events from the `chain::Watch`, returning whether any events were processed. fn process_pending_monitor_events(&self) -> bool { let mut failed_channels = Vec::new(); - let pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); + let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for monitor_event in pending_monitor_events { + for monitor_event in pending_monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage); + self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index feeb691b..807bb20f 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1006,6 +1006,20 @@ macro_rules! expect_payment_sent { } } +macro_rules! expect_payment_forwarded { + ($node: expr, $expected_fee: expr, $upstream_force_closed: expr) => { + let events = $node.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => { + assert_eq!(fee_earned_msat, $expected_fee); + assert_eq!(claim_from_onchain_tx, $upstream_force_closed); + }, + _ => panic!("Unexpected event"), + } + } +} + #[cfg(test)] macro_rules! expect_payment_failure_chan_update { ($node: expr, $scid: expr, $chan_closed: expr) => { @@ -1170,6 +1184,8 @@ pub fn claim_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, exp ($node: expr, $prev_node: expr, $new_msgs: expr) => { { $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); + let fee = $node.node.channel_state.lock().unwrap().by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap().config.forwarding_fee_base_msat; + expect_payment_forwarded!($node, Some(fee as u64), false); check_added_monitors!($node, 1); let new_next_msgs = if $new_msgs { let events = $node.node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 7e9eb4c9..6e5395e6 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -887,6 +887,7 @@ fn updates_shutdown_wait() { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -1061,6 +1062,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], Some(1000), false); check_added_monitors!(nodes[1], 1); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -2833,6 +2835,12 @@ fn test_htlc_on_chain_success() { assert_eq!(added_monitors[0].0.txid, chan_2.3.txid()); added_monitors.clear(); } + let forwarded_events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(forwarded_events.len(), 2); + if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[0] { + } else { panic!(); } + if let Event::PaymentForwarded { fee_earned_msat: Some(1000), claim_from_onchain_tx: true } = forwarded_events[1] { + } else { panic!(); } let events = nodes[1].node.get_and_clear_pending_msg_events(); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); @@ -5330,19 +5338,15 @@ fn test_onchain_to_onchain_claim() { // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}); - connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires + check_added_monitors!(nodes[1], 1); + expect_payment_forwarded!(nodes[1], Some(1000), true); { let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // ChannelMonitor: claim tx, ChannelManager: local commitment tx - assert_eq!(b_txn.len(), 2); + // ChannelMonitor: claim tx + assert_eq!(b_txn.len(), 1); check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager - check_spends!(b_txn[1], c_txn[1]); // timeout tx on C remote commitment tx, issued by ChannelMonitor - assert_eq!(b_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert!(b_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - assert_ne!(b_txn[1].lock_time, 0); // Timeout tx b_txn.clear(); } - check_added_monitors!(nodes[1], 1); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 3); check_added_monitors!(nodes[1], 1); @@ -5368,19 +5372,14 @@ fn test_onchain_to_onchain_claim() { let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // ChannelMonitor: HTLC-Success tx + HTLC-Timeout RBF Bump, ChannelManager: local commitment tx + HTLC-Success tx - assert_eq!(b_txn.len(), 4); - check_spends!(b_txn[2], chan_1.3); - check_spends!(b_txn[3], b_txn[2]); - let (htlc_success_claim, htlc_timeout_bumped) = - if b_txn[0].input[0].previous_output.txid == commitment_tx[0].txid() - { (&b_txn[0], &b_txn[1]) } else { (&b_txn[1], &b_txn[0]) }; - check_spends!(htlc_success_claim, commitment_tx[0]); - assert_eq!(htlc_success_claim.input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert!(htlc_success_claim.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - assert_eq!(htlc_success_claim.lock_time, 0); // Success tx - check_spends!(htlc_timeout_bumped, c_txn[1]); // timeout tx on C remote commitment tx, issued by ChannelMonitor - assert_ne!(htlc_timeout_bumped.lock_time, 0); // Success tx + // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx + assert_eq!(b_txn.len(), 3); + check_spends!(b_txn[1], chan_1.3); + check_spends!(b_txn[2], b_txn[1]); + check_spends!(b_txn[0], commitment_tx[0]); + assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert_eq!(b_txn[0].lock_time, 0); // Success tx check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); @@ -5498,7 +5497,10 @@ fn test_duplicate_payment_hash_one_failure_one_success() { expect_payment_failed!(nodes[0], duplicate_payment_hash, false); // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C + // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee + // and nodes[2] fee) is rounded down and then claimed in full. mine_transaction(&nodes[1], &htlc_success_txn[0]); + expect_payment_forwarded!(nodes[1], Some(196*2), true); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); @@ -9157,6 +9159,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain assert_eq!(carol_updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &carol_updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[1], if go_onchain_before_fulfill || force_closing_node == 1 { None } else { Some(1000) }, false); // If Alice broadcasted but Bob doesn't know yet, here he prepares to tell her about the preimage. if !go_onchain_before_fulfill && broadcast_alice { let events = nodes[1].node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 7845c089..bbdb5bfa 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -136,8 +136,9 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { connect_block(&nodes[1], &block); // ChannelManager only polls chain::Watch::release_pending_monitor_events when we - // probe it for events, so we probe non-message events here (which should still end up empty): - assert_eq!(nodes[1].node.get_and_clear_pending_events().len(), 0); + // probe it for events, so we probe non-message events here (which should just be the + // PaymentForwarded event). + expect_payment_forwarded!(nodes[1], Some(1000), true); } else { // Confirm the timeout tx and check that we fail the HTLC backwards let block = Block { diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 74490eb3..1780483c 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -151,6 +151,27 @@ pub enum Event { /// The outputs which you should store as spendable by you. outputs: Vec, }, + /// This event is generated when a payment has been successfully forwarded through us and a + /// forwarding fee earned. + PaymentForwarded { + /// The fee, in milli-satoshis, which was earned as a result of the payment. + /// + /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC + /// was pending, the amount the next hop claimed will have been rounded down to the nearest + /// whole satoshi. Thus, the fee calculated here may be higher than expected as we still + /// claimed the full value in millisatoshis from the source. In this case, + /// `claim_from_onchain_tx` will be set. + /// + /// If the channel which sent us the payment has been force-closed, we will claim the funds + /// via an on-chain transaction. In that case we do not yet know the on-chain transaction + /// fees which we will spend and will instead set this to `None`. It is possible duplicate + /// `PaymentForwarded` events are generated for the same payment iff `fee_earned_msat` is + /// `None`. + fee_earned_msat: Option, + /// If this is `true`, the forwarded HTLC was claimed by our counterparty via an on-chain + /// transaction. + claim_from_onchain_tx: bool, + }, } impl Writeable for Event { @@ -218,6 +239,13 @@ impl Writeable for Event { (0, VecWriteWrapper(outputs), required), }); }, + &Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => { + 7u8.write(writer)?; + write_tlv_fields!(writer, { + (0, fee_earned_msat, option), + (2, claim_from_onchain_tx, required), + }); + }, } Ok(()) } @@ -313,6 +341,20 @@ impl MaybeReadable for Event { }; f() }, + 7u8 => { + let f = || { + let mut fee_earned_msat = None; + let mut claim_from_onchain_tx = false; + read_tlv_fields!(reader, { + (0, fee_earned_msat, option), + (2, claim_from_onchain_tx, required), + }); + Ok(Some(Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx })) + }; + f() + }, + // Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue. + x if x % 2 == 1 => Ok(None), _ => Err(msgs::DecodeError::InvalidValue) } }