+ *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ }
+
+ self.context.holder_signer.validate_counterparty_revocation(
+ self.context.cur_counterparty_commitment_transaction_number + 1,
+ &secret
+ ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+
+ self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
+ idx: self.context.cur_counterparty_commitment_transaction_number + 1,
+ secret: msg.per_commitment_secret,
+ }],
+ };
+
+ // Update state now that we've passed all the can-fail calls...
+ // (note that we may still fail to generate the new commitment_signed message, but that's
+ // OK, we step the channel here and *then* if the new generation fails we can fail the
+ // channel based on that, but stepping stuff here should be safe either way.
+ self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+ self.context.sent_message_awaiting_response = None;
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+ if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ }
+
+ log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
+ let mut to_forward_infos = Vec::new();
+ let mut revoked_htlcs = Vec::new();
+ let mut finalized_claimed_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
+ let mut require_commitment = false;
+ let mut value_to_self_msat_diff: i64 = 0;
+
+ {
+ // Take references explicitly so that we can hold multiple references to self.context.
+ let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
+ let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
+
+ // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+ pending_inbound_htlcs.retain(|htlc| {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ value_to_self_msat_diff += htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ pending_outbound_htlcs.retain(|htlc| {
+ if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
+ log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
+ if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
+ revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
+ } else {
+ finalized_claimed_htlcs.push(htlc.source.clone());
+ // They fulfilled, so we sent them money
+ value_to_self_msat_diff -= htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ for htlc in pending_inbound_htlcs.iter_mut() {
+ let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
+ true
+ } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
+ true
+ } else { false };
+ if swap {
+ let mut state = InboundHTLCState::Committed;
+ mem::swap(&mut state, &mut htlc.state);
+
+ if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
+ require_commitment = true;
+ } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
+ match forward_info {
+ PendingHTLCStatus::Fail(fail_msg) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
+ require_commitment = true;
+ match fail_msg {
+ HTLCFailureMsg::Relay(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
+ update_fail_htlcs.push(msg)
+ },
+ HTLCFailureMsg::Malformed(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
+ update_fail_malformed_htlcs.push(msg)
+ },
+ }
+ },
+ PendingHTLCStatus::Forward(forward_info) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
+ to_forward_infos.push((forward_info, htlc.htlc_id));
+ htlc.state = InboundHTLCState::Committed;
+ }
+ }
+ }
+ }
+ }
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+ log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
+ htlc.state = OutboundHTLCState::Committed;
+ }
+ if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+ require_commitment = true;
+ }
+ }
+ }
+ self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
+
+ if let Some((feerate, update_state)) = self.context.pending_update_fee {
+ match update_state {
+ FeeUpdateState::Outbound => {
+ debug_assert!(self.context.is_outbound());
+ log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ },
+ FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
+ FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
+ debug_assert!(!self.context.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+ require_commitment = true;
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ },
+ }
+ }
+
+ if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
+ // We can't actually generate a new commitment transaction (incl by freeing holding
+ // cells) while we can't update the monitor, so we just return what we have.
+ if require_commitment {
+ self.context.monitor_pending_commitment_signed = true;
+ // When the monitor updating is restored we'll call get_last_commitment_update(),
+ // which does not update state, but we're definitely now awaiting a remote revoke
+ // before we can step forward any more, so set it here.
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ }
+ self.context.monitor_pending_forwards.append(&mut to_forward_infos);
+ self.context.monitor_pending_failures.append(&mut revoked_htlcs);
+ self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
+ return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
+ }
+
+ match self.free_holding_cell_htlcs(fee_estimator, logger) {
+ (Some(mut additional_update), htlcs_to_fail) => {
+ // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+ },
+ (None, htlcs_to_fail) => {
+ if require_commitment {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
+ log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+ } else {
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
+ self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+ }
+ }
+ }
+ }
+
+ /// Queues up an outbound update fee by placing it in the holding cell. You should call
+ /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+ /// commitment update.
+ pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
+ assert!(msg_opt.is_none(), "We forced holding cell?");
+ }
+
+ /// Adds a pending update to this channel. See the doc for send_htlc for
+ /// further details on the optionness of the return value.
+ /// If our balance is too low to cover the cost of the next commitment transaction at the
+ /// new feerate, the update is cancelled.
+ ///
+ /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
+ /// [`Channel`] if `force_holding_cell` is false.
+ fn send_update_fee<F: Deref, L: Deref>(
+ &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Option<msgs::UpdateFee>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if !self.context.is_outbound() {
+ panic!("Cannot send fee from inbound channel");
+ }
+ if !self.context.is_usable() {
+ panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
+ }
+ if !self.context.is_live() {
+ panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
+ }
+
+ // Before proposing a feerate update, check that we can actually afford the new fee.
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
+ let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
+ let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
+ if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
+ //TODO: auto-close after a number of failures?
+ log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
+ return None;
+ }
+
+ // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
+ log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ return None;
+ }
+ if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
+ log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ return None;
+ }
+
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ force_holding_cell = true;
+ }
+
+ if force_holding_cell {
+ self.context.holding_cell_update_fee = Some(feerate_per_kw);
+ return None;
+ }
+
+ debug_assert!(self.context.pending_update_fee.is_none());
+ self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
+
+ Some(msgs::UpdateFee {
+ channel_id: self.context.channel_id,
+ feerate_per_kw,
+ })
+ }
+
+ /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
+ /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
+ /// resent.
+ /// No further message handling calls may be made until a channel_reestablish dance has
+ /// completed.
+ pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) where L::Target: Logger {
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ return;
+ }
+
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
+ // While the below code should be idempotent, it's simpler to just return early, as
+ // redundant disconnect events can fire, though they should be rare.
+ return;
+ }
+
+ if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
+ }
+
+ // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
+ // will be retransmitted.
+ self.context.last_sent_closing_fee = None;
+ self.context.pending_counterparty_closing_signed = None;
+ self.context.closing_fee_limits = None;
+
+ let mut inbound_drop_count = 0;
+ self.context.pending_inbound_htlcs.retain(|htlc| {
+ match htlc.state {
+ InboundHTLCState::RemoteAnnounced(_) => {
+ // They sent us an update_add_htlc but we never got the commitment_signed.
+ // We'll tell them what commitment_signed we're expecting next and they'll drop
+ // this HTLC accordingly
+ inbound_drop_count += 1;
+ false
+ },
+ InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
+ // We received a commitment_signed updating this HTLC and (at least hopefully)
+ // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
+ // in response to it yet, so don't touch it.
+ true