+ let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
+ (0, 0)
+ } else {
+ let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
+ (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
+ dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
+ };
+ let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
+ let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
+ if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+ on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ }
+
+ let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
+ let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
+ if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
+ on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ }
+
+ let pending_value_to_self_msat =
+ self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
+ let pending_remote_value_msat =
+ self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
+ if pending_remote_value_msat < msg.amount_msat {
+ return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
+ }
+
+ // Check that the remote can afford to pay for this HTLC on-chain at the current
+ // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
+ let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
+ };
+ if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
+ return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
+ };
+
+ if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
+ return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+ }
+
+ if !self.context.is_outbound() {
+ // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
+ // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
+ // receiver's side, only on the sender's.
+ // Note that when we eventually remove support for fee updates and switch to anchor output
+ // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
+ // the extra htlc when calculating the next remote commitment transaction fee as we should
+ // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
+ // sensitive to fee spikes.
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
+ if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
+ // Note that if the pending_forward_status is not updated here, then it's because we're already failing
+ // the HTLC, i.e. its status is already set to failing.
+ log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ } else {
+ // Check that they won't violate our local required channel reserve by adding this HTLC.
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
+ return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
+ }
+ }
+ if self.context.next_counterparty_htlc_id != msg.htlc_id {
+ return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
+ }
+ if msg.cltv_expiry >= 500000000 {
+ return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
+ }
+
+ if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
+ if let PendingHTLCStatus::Forward(_) = pending_forward_status {
+ panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
+ }
+ }
+
+ // Now update local state:
+ self.context.next_counterparty_htlc_id += 1;
+ self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
+ htlc_id: msg.htlc_id,
+ amount_msat: msg.amount_msat,
+ payment_hash: msg.payment_hash,
+ cltv_expiry: msg.cltv_expiry,
+ state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
+ });
+ Ok(())
+ }
+
+ /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
+ #[inline]
+ fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
+ assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if htlc.htlc_id == htlc_id {
+ let outcome = match check_preimage {
+ None => fail_reason.into(),
+ Some(payment_preimage) => {
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+ if payment_hash != htlc.payment_hash {
+ return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
+ }
+ OutboundHTLCOutcome::Success(Some(payment_preimage))
+ }
+ };
+ match htlc.state {
+ OutboundHTLCState::LocalAnnounced(_) =>
+ return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
+ OutboundHTLCState::Committed => {
+ htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
+ },
+ OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
+ return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+ }
+ return Ok(htlc);
+ }
+ }
+ Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
+ }
+
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
+ }
+
+ self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
+ }
+
+ pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
+ }
+
+ self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
+ Ok(())
+ }
+
+ pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+ }
+
+ self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
+ Ok(())
+ }
+
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
+ where L::Target: Logger
+ {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+ }
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
+ }
+
+ let funding_script = self.context.get_funding_redeemscript();
+
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
+ let commitment_txid = {
+ let trusted_tx = commitment_stats.tx.trust();
+ let bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+
+ log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
+ log_bytes!(msg.signature.serialize_compact()[..]),
+ log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
+ log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
+ return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
+ }
+ bitcoin_tx.txid
+ };
+ let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
+
+ // If our counterparty updated the channel fee in this commitment transaction, check that
+ // they can actually afford the new fee now.
+ let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
+ update_state == FeeUpdateState::RemoteAnnounced
+ } else { false };
+ if update_fee {
+ debug_assert!(!self.context.is_outbound());
+ let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
+ if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
+ return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
+ }
+ }
+ #[cfg(any(test, fuzzing))]
+ {
+ if self.context.is_outbound() {
+ let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
+ *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ if let Some(info) = projected_commit_tx_info {
+ let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
+ + self.context.holding_cell_htlc_updates.len();
+ if info.total_pending_htlcs == total_pending_htlcs
+ && info.next_holder_htlc_id == self.context.next_holder_htlc_id
+ && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
+ && info.feerate == self.context.feerate_per_kw {
+ assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
+ }
+ }
+ }
+ }
+
+ if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
+ return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
+ }
+
+ // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
+ // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
+ // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
+ // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
+ // backwards compatibility, we never use it in production. To provide test coverage, here,
+ // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
+ #[allow(unused_assignments, unused_mut)]
+ let mut separate_nondust_htlc_sources = false;
+ #[cfg(all(feature = "std", any(test, fuzzing)))] {
+ use core::hash::{BuildHasher, Hasher};
+ // Get a random value using the only std API to do so - the DefaultHasher
+ let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
+ separate_nondust_htlc_sources = rand_val % 2 == 0;
+ }
+
+ let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
+ let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
+ for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
+ if let Some(_) = htlc.transaction_output_index {
+ let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
+ self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
+ false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+
+ let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
+ let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+ let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
+ log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
+ log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
+ encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
+ return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
+ }
+ if !separate_nondust_htlc_sources {
+ htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
+ }
+ } else {
+ htlcs_and_sigs.push((htlc, None, source_opt.take()));
+ }
+ if separate_nondust_htlc_sources {
+ if let Some(source) = source_opt.take() {
+ nondust_htlc_sources.push(source);
+ }
+ }
+ debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
+ }
+
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ commitment_stats.tx,
+ msg.signature,
+ msg.htlc_signatures.clone(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
+
+ self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
+ .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+
+ // Update state now that we've passed all the can-fail calls...
+ let mut need_commitment = false;
+ if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
+ if *update_state == FeeUpdateState::RemoteAnnounced {
+ *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
+ need_commitment = true;
+ }
+ }
+
+ for htlc in self.context.pending_inbound_htlcs.iter_mut() {
+ let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
+ Some(forward_info.clone())
+ } else { None };
+ if let Some(forward_info) = new_forward {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+ htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
+ need_commitment = true;
+ }
+ }
+ let mut claimed_htlcs = Vec::new();
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
+ // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
+ // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
+ // have a `Success(None)` reason. In this case we could forget some HTLC
+ // claims, but such an upgrade is unlikely and including claimed HTLCs here
+ // fixes a bug which the user was exposed to on 0.0.104 when they started the
+ // claim anyway.
+ claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+ }
+ htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
+ need_commitment = true;
+ }
+ }
+
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
+ commitment_tx: holder_commitment_tx,
+ htlc_outputs: htlcs_and_sigs,
+ claimed_htlcs,
+ nondust_htlc_sources,
+ }]
+ };
+
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
+ // build_commitment_no_status_check() next which will reset this to RAAFirst.
+ self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
+
+ if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ // In case we initially failed monitor updating without requiring a response, we need
+ // to make sure the RAA gets sent first.
+ self.context.monitor_pending_revoke_and_ack = true;
+ if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we were going to send a commitment_signed after the RAA, go ahead and do all
+ // the corresponding HTLC status updates so that get_last_commitment_update
+ // includes the right HTLCs.
+ self.context.monitor_pending_commitment_signed = true;
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ }
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
+ log_bytes!(self.context.channel_id));
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
+ }
+
+ let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
+ // we'll send one right away when we get the revoke_and_ack when we
+ // free_holding_cell_htlcs().
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ true
+ } else { false };
+
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
+ log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+ self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
+ }
+
+ /// Public version of the below, checking relevant preconditions first.
+ /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
+ /// returns `(None, Vec::new())`.
+ pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
+ (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+ self.free_holding_cell_htlcs(logger)
+ } else { (None, Vec::new()) }
+ }
+
+ /// Frees any pending commitment updates in the holding cell, generating the relevant messages
+ /// for our counterparty.
+ fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+ if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
+ log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
+ if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
+
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+ updates: Vec::new(),
+ };
+
+ let mut htlc_updates = Vec::new();
+ mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
+ let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut htlcs_to_fail = Vec::new();
+ for htlc_update in htlc_updates.drain(..) {
+ // Note that this *can* fail, though it should be due to rather-rare conditions on
+ // fee races with adding too many outputs which push our total payments just over
+ // the limit. In case it's less rare than I anticipate, we may want to revisit
+ // handling this case better and maybe fulfilling some of the HTLCs while attempting
+ // to rebalance channels.
+ match &htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
+ match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
+ Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
+ Err(e) => {
+ match e {
+ ChannelError::Ignore(ref msg) => {
+ log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
+ log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
+ // If we fail to send here, then this HTLC should
+ // be failed backwards. Failing to send here
+ // indicates that this HTLC may keep being put back
+ // into the holding cell without ever being
+ // successfully forwarded/failed/fulfilled, causing
+ // our counterparty to eventually close on us.
+ htlcs_to_fail.push((source.clone(), *payment_hash));
+ },
+ _ => {
+ panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
+ },
+ }
+ }
+ }
+ },
+ &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
+ // If an HTLC claim was previously added to the holding cell (via
+ // `get_update_fulfill_htlc`, then generating the claim message itself must
+ // not fail - any in between attempts to claim the HTLC will have resulted
+ // in it hitting the holding cell again and we cannot change the state of a
+ // holding cell HTLC from fulfill to anything else.
+ let (update_fulfill_msg_option, mut additional_monitor_update) =
+ if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+ (msg, monitor_update)
+ } else { unreachable!() };
+ update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+ monitor_update.updates.append(&mut additional_monitor_update.updates);
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
+ match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
+ Ok(update_fail_msg_option) => {
+ // If an HTLC failure was previously added to the holding cell (via
+ // `queue_fail_htlc`) then generating the fail message itself must
+ // not fail - we should never end up in a state where we double-fail
+ // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+ // for a full revocation before failing.
+ update_fail_htlcs.push(update_fail_msg_option.unwrap())
+ },
+ Err(e) => {
+ if let ChannelError::Ignore(_) = e {}
+ else {
+ panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+ }
+ }
+ }
+ },
+ }
+ }
+ if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
+ return (None, htlcs_to_fail);
+ }
+ let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
+ self.send_update_fee(feerate, false, logger)
+ } else {
+ None
+ };
+
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+ // but we want them to be strictly increasing by one, so reset it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
+ log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
+
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
+ } else {
+ (None, Vec::new())
+ }
+ }
+
+ /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
+ /// commitment_signed message here in case we had pending outbound HTLCs to add which were
+ /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
+ /// generating an appropriate error *after* the channel state has been updated based on the
+ /// revoke_and_ack message.
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
+ where L::Target: Logger,
+ {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
+ }
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
+ }
+
+ let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
+
+ if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
+ if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
+ return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));