From c09104f46ef5b44a1efb38669b95d21fa77d37ec Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 15 Jul 2021 21:56:42 +0000 Subject: [PATCH] Simplify call graph of get_update_fulfill_htlc since it can't Err. --- lightning/src/ln/channel.rs | 113 +++++++++++++++++------------ lightning/src/ln/channelmanager.rs | 36 ++++----- 2 files changed, 83 insertions(+), 66 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 6d9552577..f46d76042 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -301,6 +301,33 @@ pub struct CounterpartyForwardingInfo { pub cltv_expiry_delta: u16, } +/// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for +/// description +enum UpdateFulfillFetch { + NewClaim { + monitor_update: ChannelMonitorUpdate, + msg: Option, + }, + DuplicateClaim {}, +} + +/// The return type of get_update_fulfill_htlc_and_commit. +pub enum UpdateFulfillCommitFetch { + /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed + /// it in the holding cell, or re-generated the update_fulfill message after the same claim was + /// previously placed in the holding cell (and has since been removed). + NewClaim { + /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor + monitor_update: ChannelMonitorUpdate, + /// The update_fulfill message and commitment_signed message (if the claim was not placed + /// in the holding cell). + msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, + }, + /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell + /// or has been forgotten (presumably previously claimed). + DuplicateClaim {}, +} + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -1232,13 +1259,7 @@ impl Channel { make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey()) } - /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made. - /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always - /// return Ok(_) if debug assertions are turned on or preconditions are met. - /// - /// Note that it is still possible to hit these assertions in case we find a preimage on-chain - /// but then have a reorg which settles on an HTLC-failure on chain. - fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> Result<(Option, Option), ChannelError> where L::Target: Logger { + fn get_update_fulfill_htlc(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger { // Either ChannelFunded got set (which means it won't be unset) or there is no way any // caller thought we could have something claimed (cause we wouldn't have accepted in an // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us, @@ -1266,7 +1287,7 @@ impl Channel { log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id())); debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); } - return Ok((None, None)); + return UpdateFulfillFetch::DuplicateClaim {}; }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); @@ -1282,7 +1303,7 @@ impl Channel { // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and // this is simply a duplicate claim, not previously failed and we lost funds. debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok((None, None)); + return UpdateFulfillFetch::DuplicateClaim {}; } // Now update local state: @@ -1306,7 +1327,7 @@ impl Channel { self.latest_monitor_update_id -= 1; #[cfg(any(test, feature = "fuzztarget"))] debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok((None, None)); + return UpdateFulfillFetch::DuplicateClaim {}; } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { @@ -1315,7 +1336,7 @@ impl Channel { // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); - return Ok((None, Some(monitor_update))); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } }, _ => {} @@ -1327,7 +1348,7 @@ impl Channel { }); #[cfg(any(test, feature = "fuzztarget"))] self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); - return Ok((None, Some(monitor_update))); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } #[cfg(any(test, feature = "fuzztarget"))] self.historical_inbound_htlc_fulfills.insert(htlc_id_arg); @@ -1337,44 +1358,43 @@ impl Channel { if let InboundHTLCState::Committed = htlc.state { } else { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); - return Ok((None, Some(monitor_update))); + return UpdateFulfillFetch::NewClaim { monitor_update, msg: None }; } log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id)); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } - Ok((Some(msgs::UpdateFulfillHTLC { - channel_id: self.channel_id(), - htlc_id: htlc_id_arg, - payment_preimage: payment_preimage_arg, - }), Some(monitor_update))) + UpdateFulfillFetch::NewClaim { + monitor_update, + msg: Some(msgs::UpdateFulfillHTLC { + channel_id: self.channel_id(), + htlc_id: htlc_id_arg, + payment_preimage: payment_preimage_arg, + }), + } } - pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option), ChannelError> where L::Target: Logger { - match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger)? { - (Some(update_fulfill_htlc), Some(mut monitor_update)) => { + pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result where L::Target: Logger { + match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { + UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => { let (commitment, mut additional_update) = self.send_commitment_no_status_check(logger)?; // send_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update))) - }, - (Some(update_fulfill_htlc), None) => { - let (commitment, monitor_update) = self.send_commitment_no_status_check(logger)?; - Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update))) + Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) }) }, - (None, Some(monitor_update)) => Ok((None, Some(monitor_update))), - (None, None) => Ok((None, None)) + UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }), + UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}), } } - /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made. - /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always - /// return Ok(_) if debug assertions are turned on or preconditions are met. - /// - /// Note that it is still possible to hit these assertions in case we find a preimage on-chain - /// but then have a reorg which settles on an HTLC-failure on chain. + /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill + /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, + /// however, fail more than once as we wait for an upstream failure to be irrevocably committed + /// before we fail backwards. + /// If we do fail twice, we debug_assert!(false) and return Ok(None). Thus, will always return + /// Ok(_) if debug assertions are turned on or preconditions are met. pub fn get_update_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) { panic!("Was asked to fail an HTLC when channel was not in an operational state"); @@ -2468,20 +2488,17 @@ impl Channel { } }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => { - match self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { - Ok((update_fulfill_msg_option, additional_monitor_update_opt)) => { - update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); - if let Some(mut additional_monitor_update) = additional_monitor_update_opt { - monitor_update.updates.append(&mut additional_monitor_update.updates); - } - }, - Err(e) => { - if let ChannelError::Ignore(_) = e {} - else { - panic!("Got a non-IgnoreError action trying to fulfill holding cell HTLC"); - } - } - } + // If an HTLC claim was previously added to the holding cell (via + // `get_update_fulfill_htlc`, then generating the claim message itself must + // not fail - any in between attempts to claim the HTLC will have resulted + // in it hitting the holding cell again and we cannot change the state of a + // holding cell HTLC from fulfill to anything else. + let (update_fulfill_msg_option, mut additional_monitor_update) = + if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) { + (msg, monitor_update) + } else { unreachable!() }; + update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap()); + monitor_update.updates.append(&mut additional_monitor_update.updates); }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => { match self.get_update_fail_htlc(htlc_id, err_packet.clone(), logger) { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 21d13eaef..679752380 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -44,7 +44,7 @@ use chain::transaction::{OutPoint, TransactionData}; // construct one themselves. use ln::{PaymentHash, PaymentPreimage, PaymentSecret}; pub use ln::channel::CounterpartyForwardingInfo; -use ln::channel::{Channel, ChannelError, ChannelUpdateStatus}; +use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; use ln::features::{InitFeatures, NodeFeatures}; use routing::router::{Route, RouteHop}; use ln::msgs; @@ -2681,8 +2681,8 @@ impl ChannelMana if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) { let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { - Ok((msgs, monitor_option)) => { - if let Some(monitor_update) = monitor_option { + Ok(msgs_monitor_option) => { + if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option { if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { if was_frozen_for_monitor { assert!(msgs.is_none()); @@ -2690,21 +2690,21 @@ impl ChannelMana return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err()))); } } - } - if let Some((msg, commitment_signed)) = msgs { - log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", - log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: vec![msg], - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: None, - commitment_signed, - } - }); + if let Some((msg, commitment_signed)) = msgs { + log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", + log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id())); + channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get().get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: vec![msg], + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: None, + commitment_signed, + } + }); + } } return Ok(()) }, -- 2.39.5