- let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
- let commitment_tx_info = CommitmentTxInfoCached {
- fee,
- total_pending_htlcs,
- next_holder_htlc_id: match htlc.origin {
- HTLCInitiator::LocalOffered => self.context.next_holder_htlc_id + 1,
- HTLCInitiator::RemoteOffered => self.context.next_holder_htlc_id,
- },
- next_counterparty_htlc_id: match htlc.origin {
- HTLCInitiator::LocalOffered => self.context.next_counterparty_htlc_id,
- HTLCInitiator::RemoteOffered => self.context.next_counterparty_htlc_id + 1,
- },
- feerate: self.context.feerate_per_kw,
- };
- *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
- }
- res
- }
-
- pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
- where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
- // We can't accept HTLCs sent after we've sent a shutdown.
- let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if local_sent_shutdown {
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
- }
- // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
- let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if remote_sent_shutdown {
- return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
- }
- if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
- return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
- }
- if msg.amount_msat == 0 {
- return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
- }
- if msg.amount_msat < self.context.holder_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
- }
-
- let inbound_stats = self.get_inbound_pending_htlc_stats(None);
- let outbound_stats = self.get_outbound_pending_htlc_stats(None);
- if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
- return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
- }
- if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
- }
- // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
- // the reserve_satoshis we told them to always have as direct payment so that they lose
- // something if we punish them for broadcasting an old state).
- // Note that we don't really care about having a small/no to_remote output in our local
- // commitment transactions, as the purpose of the channel reserve is to ensure we can
- // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
- // present in the next commitment transaction we send them (at least for fulfilled ones,
- // failed ones won't modify value_to_self).
- // Note that we will send HTLCs which another instance of rust-lightning would think
- // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
- // Channel state once they will not be present in the next received commitment
- // transaction).
- let mut removed_outbound_total_msat = 0;
- for ref htlc in self.context.pending_outbound_htlcs.iter() {
- if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
- removed_outbound_total_msat += htlc.amount_msat;
- } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
- removed_outbound_total_msat += htlc.amount_msat;
- }
- }
-
- let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
- (0, 0)
- } else {
- let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
- (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
- dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
- };
- let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
- if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
- let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
- if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
- log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- on_counterparty_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
- }
-
- let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
- if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
- let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
- if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
- log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
- on_holder_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
- }
-
- let pending_value_to_self_msat =
- self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
- let pending_remote_value_msat =
- self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
- if pending_remote_value_msat < msg.amount_msat {
- return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
- }
-
- // Check that the remote can afford to pay for this HTLC on-chain at the current
- // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
- let remote_commit_tx_fee_msat = if self.is_outbound() { 0 } else {
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- self.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
- };
- if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
- return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
- };
-
- if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
- return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
- }
-
- if !self.is_outbound() {
- // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
- // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
- // receiver's side, only on the sender's.
- // Note that when we eventually remove support for fee updates and switch to anchor output
- // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
- // the extra htlc when calculating the next remote commitment transaction fee as we should
- // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
- // sensitive to fee spikes.
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
- if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
- // Note that if the pending_forward_status is not updated here, then it's because we're already failing
- // the HTLC, i.e. its status is already set to failing.
- log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id()));
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
- } else {
- // Check that they won't violate our local required channel reserve by adding this HTLC.
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(htlc_candidate, None);
- if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
- return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
- }
- }
- if self.context.next_counterparty_htlc_id != msg.htlc_id {
- return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
- }
- if msg.cltv_expiry >= 500000000 {
- return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
- }
-
- if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
- if let PendingHTLCStatus::Forward(_) = pending_forward_status {
- panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
- }
- }
-
- // Now update local state:
- self.context.next_counterparty_htlc_id += 1;
- self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
- htlc_id: msg.htlc_id,
- amount_msat: msg.amount_msat,
- payment_hash: msg.payment_hash,
- cltv_expiry: msg.cltv_expiry,
- state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
- });
- Ok(())
- }
-
- /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
- #[inline]
- fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
- assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if htlc.htlc_id == htlc_id {
- let outcome = match check_preimage {
- None => fail_reason.into(),
- Some(payment_preimage) => {
- let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
- if payment_hash != htlc.payment_hash {
- return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
- }
- OutboundHTLCOutcome::Success(Some(payment_preimage))
- }
- };
- match htlc.state {
- OutboundHTLCState::LocalAnnounced(_) =>
- return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
- OutboundHTLCState::Committed => {
- htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
- },
- OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
- return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
- }
- return Ok(htlc);
- }
- }
- Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
- }
-
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
- }
-
- self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
- }
-
- pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
- }
-
- self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
- Ok(())
- }
-
- pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
- }
-
- self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
- Ok(())
- }
-
- pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
- where L::Target: Logger
- {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
- }
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
- return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
- }
-
- let funding_script = self.get_funding_redeemscript();
-
- let keys = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
-
- let commitment_stats = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
- let commitment_txid = {
- let trusted_tx = commitment_stats.tx.trust();
- let bitcoin_tx = trusted_tx.built_transaction();
- let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
-
- log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
- log_bytes!(msg.signature.serialize_compact()[..]),
- log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
- log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
- return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
- }
- bitcoin_tx.txid
- };
- let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
-
- // If our counterparty updated the channel fee in this commitment transaction, check that
- // they can actually afford the new fee now.
- let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
- update_state == FeeUpdateState::RemoteAnnounced
- } else { false };
- if update_fee {
- debug_assert!(!self.is_outbound());
- let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
- if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
- return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
- }
- }
- #[cfg(any(test, fuzzing))]
- {
- if self.is_outbound() {
- let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
- *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
- if let Some(info) = projected_commit_tx_info {
- let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
- + self.context.holding_cell_htlc_updates.len();
- if info.total_pending_htlcs == total_pending_htlcs
- && info.next_holder_htlc_id == self.context.next_holder_htlc_id
- && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
- && info.feerate == self.context.feerate_per_kw {
- assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
- }
- }
- }
- }
-
- if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
- return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
- }
-
- // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
- // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
- // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
- // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
- // backwards compatibility, we never use it in production. To provide test coverage, here,
- // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
- #[allow(unused_assignments, unused_mut)]
- let mut separate_nondust_htlc_sources = false;
- #[cfg(all(feature = "std", any(test, fuzzing)))] {
- use core::hash::{BuildHasher, Hasher};
- // Get a random value using the only std API to do so - the DefaultHasher
- let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
- separate_nondust_htlc_sources = rand_val % 2 == 0;
- }
-
- let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
- let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
- for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
- if let Some(_) = htlc.transaction_output_index {
- let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
- self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
- false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
-
- let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
- let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
- let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
- log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
- log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
- encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
- return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
- }
- if !separate_nondust_htlc_sources {
- htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
- }
- } else {
- htlcs_and_sigs.push((htlc, None, source_opt.take()));
- }
- if separate_nondust_htlc_sources {
- if let Some(source) = source_opt.take() {
- nondust_htlc_sources.push(source);
- }
- }
- debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
- }
-
- let holder_commitment_tx = HolderCommitmentTransaction::new(
- commitment_stats.tx,
- msg.signature,
- msg.htlc_signatures.clone(),
- &self.get_holder_pubkeys().funding_pubkey,
- self.counterparty_funding_pubkey()
- );
-
- self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
- .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
-
- // Update state now that we've passed all the can-fail calls...
- let mut need_commitment = false;
- if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
- if *update_state == FeeUpdateState::RemoteAnnounced {
- *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
- need_commitment = true;
- }
- }
-
- for htlc in self.context.pending_inbound_htlcs.iter_mut() {
- let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
- Some(forward_info.clone())
- } else { None };
- if let Some(forward_info) = new_forward {
- log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
- log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
- htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
- need_commitment = true;
- }
- }
- let mut claimed_htlcs = Vec::new();
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
- log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
- log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
- // Grab the preimage, if it exists, instead of cloning
- let mut reason = OutboundHTLCOutcome::Success(None);
- mem::swap(outcome, &mut reason);
- if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
- // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
- // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
- // have a `Success(None)` reason. In this case we could forget some HTLC
- // claims, but such an upgrade is unlikely and including claimed HTLCs here
- // fixes a bug which the user was exposed to on 0.0.104 when they started the
- // claim anyway.
- claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
- }
- htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
- need_commitment = true;
- }
- }
-
- self.context.latest_monitor_update_id += 1;
- let mut monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
- commitment_tx: holder_commitment_tx,
- htlc_outputs: htlcs_and_sigs,
- claimed_htlcs,
- nondust_htlc_sources,
- }]
- };
-
- self.context.cur_holder_commitment_transaction_number -= 1;
- // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
- // build_commitment_no_status_check() next which will reset this to RAAFirst.
- self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
-
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
- // In case we initially failed monitor updating without requiring a response, we need
- // to make sure the RAA gets sent first.
- self.context.monitor_pending_revoke_and_ack = true;
- if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
- // If we were going to send a commitment_signed after the RAA, go ahead and do all
- // the corresponding HTLC status updates so that get_last_commitment_update
- // includes the right HTLCs.
- self.context.monitor_pending_commitment_signed = true;
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- }
- log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
- log_bytes!(self.context.channel_id));
- return Ok(self.push_ret_blockable_mon_update(monitor_update));