]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Simplify call graph of get_update_fulfill_htlc since it can't Err.
authorMatt Corallo <git@bluematt.me>
Thu, 15 Jul 2021 21:56:42 +0000 (21:56 +0000)
committerMatt Corallo <git@bluematt.me>
Wed, 28 Jul 2021 00:34:53 +0000 (00:34 +0000)
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs

index 6d95525773445363361f5c6b5ba0d9f5da499d9f..f46d76042df91601e4bc278425906e40bf075e37 100644 (file)
@@ -301,6 +301,33 @@ pub struct CounterpartyForwardingInfo {
        pub cltv_expiry_delta: u16,
 }
 
+/// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
+/// description
+enum UpdateFulfillFetch {
+       NewClaim {
+               monitor_update: ChannelMonitorUpdate,
+               msg: Option<msgs::UpdateFulfillHTLC>,
+       },
+       DuplicateClaim {},
+}
+
+/// The return type of get_update_fulfill_htlc_and_commit.
+pub enum UpdateFulfillCommitFetch {
+       /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
+       /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
+       /// previously placed in the holding cell (and has since been removed).
+       NewClaim {
+               /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
+               monitor_update: ChannelMonitorUpdate,
+               /// The update_fulfill message and commitment_signed message (if the claim was not placed
+               /// in the holding cell).
+               msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>,
+       },
+       /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
+       /// or has been forgotten (presumably previously claimed).
+       DuplicateClaim {},
+}
+
 // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
 // has been completed, and then turn into a Channel to get compiler-time enforcement of things like
 // calling channel_id() before we're set up or things like get_outbound_funding_signed on an
@@ -1232,13 +1259,7 @@ impl<Signer: Sign> Channel<Signer> {
                make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
        }
 
-       /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
-       /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always
-       /// return Ok(_) if debug assertions are turned on or preconditions are met.
-       ///
-       /// Note that it is still possible to hit these assertions in case we find a preimage on-chain
-       /// but then have a reorg which settles on an HTLC-failure on chain.
-       fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> Result<(Option<msgs::UpdateFulfillHTLC>, Option<ChannelMonitorUpdate>), ChannelError> where L::Target: Logger {
+       fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
                // Either ChannelFunded got set (which means it won't be unset) or there is no way any
                // caller thought we could have something claimed (cause we wouldn't have accepted in an
                // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
@@ -1266,7 +1287,7 @@ impl<Signer: Sign> Channel<Signer> {
                                                        log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
                                                }
-                                               return Ok((None, None));
+                                               return UpdateFulfillFetch::DuplicateClaim {};
                                        },
                                        _ => {
                                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
@@ -1282,7 +1303,7 @@ impl<Signer: Sign> Channel<Signer> {
                        // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
                        // this is simply a duplicate claim, not previously failed and we lost funds.
                        debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
-                       return Ok((None, None));
+                       return UpdateFulfillFetch::DuplicateClaim {};
                }
 
                // Now update local state:
@@ -1306,7 +1327,7 @@ impl<Signer: Sign> Channel<Signer> {
                                                        self.latest_monitor_update_id -= 1;
                                                        #[cfg(any(test, feature = "fuzztarget"))]
                                                        debug_assert!(self.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
-                                                       return Ok((None, None));
+                                                       return UpdateFulfillFetch::DuplicateClaim {};
                                                }
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
@@ -1315,7 +1336,7 @@ impl<Signer: Sign> Channel<Signer> {
                                                        // TODO: We may actually be able to switch to a fulfill here, though its
                                                        // rare enough it may not be worth the complexity burden.
                                                        debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
-                                                       return Ok((None, Some(monitor_update)));
+                                                       return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
                                                }
                                        },
                                        _ => {}
@@ -1327,7 +1348,7 @@ impl<Signer: Sign> Channel<Signer> {
                        });
                        #[cfg(any(test, feature = "fuzztarget"))]
                        self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
-                       return Ok((None, Some(monitor_update)));
+                       return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
                }
                #[cfg(any(test, feature = "fuzztarget"))]
                self.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
@@ -1337,44 +1358,43 @@ impl<Signer: Sign> Channel<Signer> {
                        if let InboundHTLCState::Committed = htlc.state {
                        } else {
                                debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
-                               return Ok((None, Some(monitor_update)));
+                               return UpdateFulfillFetch::NewClaim { monitor_update, msg: None };
                        }
                        log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id));
                        htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
                }
 
-               Ok((Some(msgs::UpdateFulfillHTLC {
-                       channel_id: self.channel_id(),
-                       htlc_id: htlc_id_arg,
-                       payment_preimage: payment_preimage_arg,
-               }), Some(monitor_update)))
+               UpdateFulfillFetch::NewClaim {
+                       monitor_update,
+                       msg: Some(msgs::UpdateFulfillHTLC {
+                               channel_id: self.channel_id(),
+                               htlc_id: htlc_id_arg,
+                               payment_preimage: payment_preimage_arg,
+                       }),
+               }
        }
 
-       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<(Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, Option<ChannelMonitorUpdate>), ChannelError> where L::Target: Logger {
-               match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger)? {
-                       (Some(update_fulfill_htlc), Some(mut monitor_update)) => {
+       pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result<UpdateFulfillCommitFetch, ChannelError> where L::Target: Logger {
+               match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+                       UpdateFulfillFetch::NewClaim { mut monitor_update, msg: Some(update_fulfill_htlc) } => {
                                let (commitment, mut additional_update) = self.send_commitment_no_status_check(logger)?;
                                // send_commitment_no_status_check may bump latest_monitor_id but we want them to be
                                // strictly increasing by one, so decrement it here.
                                self.latest_monitor_update_id = monitor_update.update_id;
                                monitor_update.updates.append(&mut additional_update.updates);
-                               Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
-                       },
-                       (Some(update_fulfill_htlc), None) => {
-                               let (commitment, monitor_update) = self.send_commitment_no_status_check(logger)?;
-                               Ok((Some((update_fulfill_htlc, commitment)), Some(monitor_update)))
+                               Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: Some((update_fulfill_htlc, commitment)) })
                        },
-                       (None, Some(monitor_update)) => Ok((None, Some(monitor_update))),
-                       (None, None) => Ok((None, None))
+                       UpdateFulfillFetch::NewClaim { monitor_update, msg: None } => Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, msgs: None }),
+                       UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}),
                }
        }
 
-       /// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
-       /// In such cases we debug_assert!(false) and return a ChannelError::Ignore. Thus, will always
-       /// return Ok(_) if debug assertions are turned on or preconditions are met.
-       ///
-       /// Note that it is still possible to hit these assertions in case we find a preimage on-chain
-       /// but then have a reorg which settles on an HTLC-failure on chain.
+       /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+       /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+       /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+       /// before we fail backwards.
+       /// If we do fail twice, we debug_assert!(false) and return Ok(None). Thus, will always return
+       /// Ok(_) if debug assertions are turned on or preconditions are met.
        pub fn get_update_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
                if (self.channel_state & (ChannelState::ChannelFunded as u32)) != (ChannelState::ChannelFunded as u32) {
                        panic!("Was asked to fail an HTLC when channel was not in an operational state");
@@ -2468,20 +2488,17 @@ impl<Signer: Sign> Channel<Signer> {
                                                }
                                        },
                                        &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
-                                               match self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
-                                                       Ok((update_fulfill_msg_option, additional_monitor_update_opt)) => {
-                                                               update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
-                                                               if let Some(mut additional_monitor_update) = additional_monitor_update_opt {
-                                                                       monitor_update.updates.append(&mut additional_monitor_update.updates);
-                                                               }
-                                                       },
-                                                       Err(e) => {
-                                                               if let ChannelError::Ignore(_) = e {}
-                                                               else {
-                                                                       panic!("Got a non-IgnoreError action trying to fulfill holding cell HTLC");
-                                                               }
-                                                       }
-                                               }
+                                               // If an HTLC claim was previously added to the holding cell (via
+                                               // `get_update_fulfill_htlc`, then generating the claim message itself must
+                                               // not fail - any in between attempts to claim the HTLC will have resulted
+                                               // in it hitting the holding cell again and we cannot change the state of a
+                                               // holding cell HTLC from fulfill to anything else.
+                                               let (update_fulfill_msg_option, mut additional_monitor_update) =
+                                                       if let UpdateFulfillFetch::NewClaim { msg, monitor_update } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+                                                               (msg, monitor_update)
+                                                       } else { unreachable!() };
+                                               update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+                                               monitor_update.updates.append(&mut additional_monitor_update.updates);
                                        },
                                        &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
                                                match self.get_update_fail_htlc(htlc_id, err_packet.clone(), logger) {
index 21d13eaefc2d350c81c32ad3c246c4f3823e4c15..67975238050472790da5bbc344d1f0bb67fda2a4 100644 (file)
@@ -44,7 +44,7 @@ use chain::transaction::{OutPoint, TransactionData};
 // construct one themselves.
 use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
 pub use ln::channel::CounterpartyForwardingInfo;
-use ln::channel::{Channel, ChannelError, ChannelUpdateStatus};
+use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
 use ln::msgs;
@@ -2681,8 +2681,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
                        let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
                        match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
-                               Ok((msgs, monitor_option)) => {
-                                       if let Some(monitor_update) = monitor_option {
+                               Ok(msgs_monitor_option) => {
+                                       if let UpdateFulfillCommitFetch::NewClaim { msgs, monitor_update } = msgs_monitor_option {
                                                if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
                                                        if was_frozen_for_monitor {
                                                                assert!(msgs.is_none());
@@ -2690,21 +2690,21 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                return Err(Some((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err())));
                                                        }
                                                }
-                                       }
-                                       if let Some((msg, commitment_signed)) = msgs {
-                                               log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
-                                                       log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
-                                               channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
-                                                       node_id: chan.get().get_counterparty_node_id(),
-                                                       updates: msgs::CommitmentUpdate {
-                                                               update_add_htlcs: Vec::new(),
-                                                               update_fulfill_htlcs: vec![msg],
-                                                               update_fail_htlcs: Vec::new(),
-                                                               update_fail_malformed_htlcs: Vec::new(),
-                                                               update_fee: None,
-                                                               commitment_signed,
-                                                       }
-                                               });
+                                               if let Some((msg, commitment_signed)) = msgs {
+                                                       log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+                                                               log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
+                                                       channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+                                                               node_id: chan.get().get_counterparty_node_id(),
+                                                               updates: msgs::CommitmentUpdate {
+                                                                       update_add_htlcs: Vec::new(),
+                                                                       update_fulfill_htlcs: vec![msg],
+                                                                       update_fail_htlcs: Vec::new(),
+                                                                       update_fail_malformed_htlcs: Vec::new(),
+                                                                       update_fee: None,
+                                                                       commitment_signed,
+                                                               }
+                                                       });
+                                               }
                                        }
                                        return Ok(())
                                },