+ }
+ let mut claimed_htlcs = Vec::new();
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
+ &htlc.payment_hash, &self.context.channel_id);
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
+ // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
+ // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
+ // have a `Success(None)` reason. In this case we could forget some HTLC
+ // claims, but such an upgrade is unlikely and including claimed HTLCs here
+ // fixes a bug which the user was exposed to on 0.0.104 when they started the
+ // claim anyway.
+ claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+ }
+ htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
+ need_commitment = true;
+ }
+ }
+
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
+ commitment_tx: holder_commitment_tx,
+ htlc_outputs: htlcs_and_sigs,
+ claimed_htlcs,
+ nondust_htlc_sources,
+ }]
+ };
+
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
+ // build_commitment_no_status_check() next which will reset this to RAAFirst.
+ self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
+
+ if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ // In case we initially failed monitor updating without requiring a response, we need
+ // to make sure the RAA gets sent first.
+ self.context.monitor_pending_revoke_and_ack = true;
+ if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we were going to send a commitment_signed after the RAA, go ahead and do all
+ // the corresponding HTLC status updates so that get_last_commitment_update
+ // includes the right HTLCs.
+ self.context.monitor_pending_commitment_signed = true;
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ }
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
+ &self.context.channel_id);
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
+ }
+
+ let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
+ // we'll send one right away when we get the revoke_and_ack when we
+ // free_holding_cell_htlcs().
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ true
+ } else { false };
+
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
+ &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+ self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
+ }
+
+ /// Public version of the below, checking relevant preconditions first.
+ /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
+ /// returns `(None, Vec::new())`.
+ pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
+ (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+ self.free_holding_cell_htlcs(fee_estimator, logger)
+ } else { (None, Vec::new()) }
+ }
+
+ /// Frees any pending commitment updates in the holding cell, generating the relevant messages
+ /// for our counterparty.
+ fn free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+ if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
+ log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
+ if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
+
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+ updates: Vec::new(),
+ };
+
+ let mut htlc_updates = Vec::new();
+ mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
+ let mut update_add_count = 0;
+ let mut update_fulfill_count = 0;
+ let mut update_fail_count = 0;
+ let mut htlcs_to_fail = Vec::new();
+ for htlc_update in htlc_updates.drain(..) {
+ // Note that this *can* fail, though it should be due to rather-rare conditions on
+ // fee races with adding too many outputs which push our total payments just over
+ // the limit. In case it's less rare than I anticipate, we may want to revisit
+ // handling this case better and maybe fulfilling some of the HTLCs while attempting
+ // to rebalance channels.
+ match &htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+ skimmed_fee_msat, ..
+ } => {
+ match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
+ onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
+ {
+ Ok(_) => update_add_count += 1,
+ Err(e) => {
+ match e {
+ ChannelError::Ignore(ref msg) => {
+ log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
+ // If we fail to send here, then this HTLC should
+ // be failed backwards. Failing to send here
+ // indicates that this HTLC may keep being put back
+ // into the holding cell without ever being
+ // successfully forwarded/failed/fulfilled, causing
+ // our counterparty to eventually close on us.
+ htlcs_to_fail.push((source.clone(), *payment_hash));
+ },
+ _ => {
+ panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
+ },
+ }
+ }
+ }
+ },
+ &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
+ // If an HTLC claim was previously added to the holding cell (via
+ // `get_update_fulfill_htlc`, then generating the claim message itself must
+ // not fail - any in between attempts to claim the HTLC will have resulted
+ // in it hitting the holding cell again and we cannot change the state of a
+ // holding cell HTLC from fulfill to anything else.
+ let mut additional_monitor_update =
+ if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
+ self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
+ { monitor_update } else { unreachable!() };
+ update_fulfill_count += 1;
+ monitor_update.updates.append(&mut additional_monitor_update.updates);
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
+ match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
+ Ok(update_fail_msg_option) => {
+ // If an HTLC failure was previously added to the holding cell (via
+ // `queue_fail_htlc`) then generating the fail message itself must
+ // not fail - we should never end up in a state where we double-fail
+ // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+ // for a full revocation before failing.
+ debug_assert!(update_fail_msg_option.is_some());
+ update_fail_count += 1;
+ },
+ Err(e) => {
+ if let ChannelError::Ignore(_) = e {}
+ else {
+ panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+ }
+ }
+ }
+ },
+ }
+ }
+ if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
+ return (None, htlcs_to_fail);
+ }
+ let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
+ self.send_update_fee(feerate, false, fee_estimator, logger)
+ } else {
+ None
+ };
+
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+ // but we want them to be strictly increasing by one, so reset it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
+ &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
+ update_add_count, update_fulfill_count, update_fail_count);
+
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
+ } else {
+ (None, Vec::new())
+ }
+ }
+
+ /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
+ /// commitment_signed message here in case we had pending outbound HTLCs to add which were
+ /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
+ /// generating an appropriate error *after* the channel state has been updated based on the
+ /// revoke_and_ack message.
+ pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
+ ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger,
+ {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
+ }
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
+ }
+
+ let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
+
+ if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
+ if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
+ return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
+ }
+ }
+
+ if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
+ // Our counterparty seems to have burned their coins to us (by revoking a state when we
+ // haven't given them a new commitment transaction to broadcast). We should probably
+ // take advantage of this by updating our channel monitor, sending them an error, and
+ // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
+ // lot of work, and there's some chance this is all a misunderstanding anyway.
+ // We have to do *something*, though, since our signer may get mad at us for otherwise
+ // jumping a remote commitment number, so best to just force-close and move on.
+ return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
+ }
+
+ #[cfg(any(test, fuzzing))]
+ {
+ *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ }
+
+ match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ ecdsa.validate_counterparty_revocation(
+ self.context.cur_counterparty_commitment_transaction_number + 1,
+ &secret
+ ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+ }
+ };
+
+ self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
+ idx: self.context.cur_counterparty_commitment_transaction_number + 1,
+ secret: msg.per_commitment_secret,
+ }],
+ };
+
+ // Update state now that we've passed all the can-fail calls...
+ // (note that we may still fail to generate the new commitment_signed message, but that's
+ // OK, we step the channel here and *then* if the new generation fails we can fail the
+ // channel based on that, but stepping stuff here should be safe either way.
+ self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+ self.context.sent_message_awaiting_response = None;
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+ if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ }
+
+ log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
+ let mut to_forward_infos = Vec::new();
+ let mut revoked_htlcs = Vec::new();
+ let mut finalized_claimed_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
+ let mut require_commitment = false;
+ let mut value_to_self_msat_diff: i64 = 0;
+
+ {
+ // Take references explicitly so that we can hold multiple references to self.context.
+ let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
+ let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
+
+ // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+ pending_inbound_htlcs.retain(|htlc| {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ value_to_self_msat_diff += htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ pending_outbound_htlcs.retain(|htlc| {
+ if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
+ log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
+ if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
+ revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
+ } else {
+ finalized_claimed_htlcs.push(htlc.source.clone());
+ // They fulfilled, so we sent them money
+ value_to_self_msat_diff -= htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ for htlc in pending_inbound_htlcs.iter_mut() {
+ let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
+ true
+ } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
+ true
+ } else { false };
+ if swap {
+ let mut state = InboundHTLCState::Committed;
+ mem::swap(&mut state, &mut htlc.state);
+
+ if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
+ htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
+ require_commitment = true;
+ } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
+ match forward_info {
+ PendingHTLCStatus::Fail(fail_msg) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
+ require_commitment = true;
+ match fail_msg {
+ HTLCFailureMsg::Relay(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
+ update_fail_htlcs.push(msg)
+ },
+ HTLCFailureMsg::Malformed(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
+ update_fail_malformed_htlcs.push(msg)
+ },
+ }
+ },
+ PendingHTLCStatus::Forward(forward_info) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
+ to_forward_infos.push((forward_info, htlc.htlc_id));
+ htlc.state = InboundHTLCState::Committed;
+ }
+ }
+ }
+ }
+ }
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+ log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
+ htlc.state = OutboundHTLCState::Committed;
+ }
+ if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+ require_commitment = true;
+ }
+ }
+ }
+ self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
+
+ if let Some((feerate, update_state)) = self.context.pending_update_fee {
+ match update_state {
+ FeeUpdateState::Outbound => {
+ debug_assert!(self.context.is_outbound());
+ log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;