Merge pull request #2063 from valentinewallace/2023-03-remove-paymentpathfailed-retry
[rust-lightning] / lightning / src / ln / channel.rs
index 5ae6f81a605ed5fa619aaeea158cb4b5c53c7edc..4aca2343f8038ea5fd64904ea4dd7e96c45d20fd 100644 (file)
@@ -27,7 +27,7 @@ use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{DecodeError, OptionalField, DataLossProtect};
 use crate::ln::script::{self, ShutdownScript};
-use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
 use crate::ln::chan_utils;
 use crate::ln::onion_utils::HTLCFailReason;
@@ -192,6 +192,7 @@ enum OutboundHTLCState {
 
 #[derive(Clone)]
 enum OutboundHTLCOutcome {
+       /// LDK version 0.0.105+ will always fill in the preimage here.
        Success(Option<PaymentPreimage>),
        Failure(HTLCFailReason),
 }
@@ -2268,9 +2269,9 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
        pub fn funding_created<SP: Deref, L: Deref>(
                &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
-       ) -> Result<(msgs::FundingSigned, ChannelMonitor<<SP::Target as SignerProvider>::Signer>, Option<msgs::ChannelReady>), ChannelError>
+       ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
        where
-               SP::Target: SignerProvider,
+               SP::Target: SignerProvider<Signer = Signer>,
                L::Target: Logger
        {
                if self.is_outbound() {
@@ -2346,10 +2347,13 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
 
                log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
 
+               let need_channel_ready = self.check_get_channel_ready(0).is_some();
+               self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+
                Ok((msgs::FundingSigned {
                        channel_id: self.channel_id,
                        signature
-               }, channel_monitor, self.check_get_channel_ready(0)))
+               }, channel_monitor))
        }
 
        /// Handles a funding_signed message from the remote end.
@@ -2480,6 +2484,11 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                        // If they haven't ever sent an updated point, the point they send should match
                                        // the current one.
                                        self.counterparty_cur_commitment_point
+                               } else if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
+                                       // If we've advanced the commitment number once, the second commitment point is
+                                       // at `counterparty_prev_commitment_point`, which is not yet revoked.
+                                       debug_assert!(self.counterparty_prev_commitment_point.is_some());
+                                       self.counterparty_prev_commitment_point
                                } else {
                                        // If they have sent updated points, channel_ready is always supposed to match
                                        // their "first" point, which we re-derive here.
@@ -3156,15 +3165,6 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        }
                }
 
-               self.latest_monitor_update_id += 1;
-               let mut monitor_update = ChannelMonitorUpdate {
-                       update_id: self.latest_monitor_update_id,
-                       updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
-                               commitment_tx: holder_commitment_tx,
-                               htlc_outputs: htlcs_and_sigs
-                       }]
-               };
-
                for htlc in self.pending_inbound_htlcs.iter_mut() {
                        let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
                                Some(forward_info.clone())
@@ -3176,6 +3176,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                need_commitment = true;
                        }
                }
+               let mut claimed_htlcs = Vec::new();
                for htlc in self.pending_outbound_htlcs.iter_mut() {
                        if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
                                log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
@@ -3183,11 +3184,30 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                // Grab the preimage, if it exists, instead of cloning
                                let mut reason = OutboundHTLCOutcome::Success(None);
                                mem::swap(outcome, &mut reason);
+                               if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
+                                       // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
+                                       // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
+                                       // have a `Success(None)` reason. In this case we could forget some HTLC
+                                       // claims, but such an upgrade is unlikely and including claimed HTLCs here
+                                       // fixes a bug which the user was exposed to on 0.0.104 when they started the
+                                       // claim anyway.
+                                       claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+                               }
                                htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
                                need_commitment = true;
                        }
                }
 
+               self.latest_monitor_update_id += 1;
+               let mut monitor_update = ChannelMonitorUpdate {
+                       update_id: self.latest_monitor_update_id,
+                       updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
+                               commitment_tx: holder_commitment_tx,
+                               htlc_outputs: htlcs_and_sigs,
+                               claimed_htlcs,
+                       }]
+               };
+
                self.cur_holder_commitment_transaction_number -= 1;
                // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
                // build_commitment_no_status_check() next which will reset this to RAAFirst.
@@ -3740,15 +3760,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
        }
 
        /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
-       /// This must be called immediately after the [`chain::Watch`] call which returned
-       /// [`ChannelMonitorUpdateStatus::InProgress`].
+       /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
+       /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
+       /// update completes (potentially immediately).
        /// The messages which were generated with the monitor update must *not* have been sent to the
        /// remote end, and must instead have been dropped. They will be regenerated when
        /// [`Self::monitor_updating_restored`] is called.
        ///
+       /// [`ChannelManager`]: super::channelmanager::ChannelManager
        /// [`chain::Watch`]: crate::chain::Watch
        /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
-       pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
+       fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
                resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
                mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
                mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
@@ -5960,9 +5982,16 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                        return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
                }
 
+               // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
+               // script is set, we just force-close and call it a day.
+               let mut chan_closed = false;
+               if self.channel_state < ChannelState::FundingSent as u32 {
+                       chan_closed = true;
+               }
+
                let update_shutdown_script = match self.shutdown_scriptpubkey {
                        Some(_) => false,
-                       None => {
+                       None if !chan_closed => {
                                let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
                                if !shutdown_scriptpubkey.is_compatible(their_features) {
                                        return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
@@ -5970,6 +5999,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
                                self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
                                true
                        },
+                       None => false,
                };
 
                // From here on out, we may not fail!
@@ -7088,7 +7118,6 @@ mod tests {
                                first_hop_htlc_msat: 548,
                                payment_id: PaymentId([42; 32]),
                                payment_secret: None,
-                               payment_params: None,
                        }
                });
 
@@ -7162,7 +7191,7 @@ mod tests {
                let secp_ctx = Secp256k1::new();
                let seed = [42; 32];
                let network = Network::Testnet;
-               let best_block = BestBlock::from_genesis(network);
+               let best_block = BestBlock::from_network(network);
                let chain_hash = best_block.block_hash();
                let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
 
@@ -7189,7 +7218,7 @@ mod tests {
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
                let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
-               let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
+               let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap();
 
                // Node B --> Node A: funding signed
                let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger);