+ self.context.cur_holder_commitment_transaction_number -= 1;
+ // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
+ // build_commitment_no_status_check() next which will reset this to RAAFirst.
+ self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
+
+ if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ // In case we initially failed monitor updating without requiring a response, we need
+ // to make sure the RAA gets sent first.
+ self.context.monitor_pending_revoke_and_ack = true;
+ if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we were going to send a commitment_signed after the RAA, go ahead and do all
+ // the corresponding HTLC status updates so that get_last_commitment_update
+ // includes the right HTLCs.
+ self.context.monitor_pending_commitment_signed = true;
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ }
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
+ &self.context.channel_id);
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
+ }
+
+ let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
+ // we'll send one right away when we get the revoke_and_ack when we
+ // free_holding_cell_htlcs().
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ true
+ } else { false };
+
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
+ &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+ self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
+ }
+
+ /// Public version of the below, checking relevant preconditions first.
+ /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
+ /// returns `(None, Vec::new())`.
+ pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
+ (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+ self.free_holding_cell_htlcs(fee_estimator, logger)
+ } else { (None, Vec::new()) }
+ }
+
+ /// Frees any pending commitment updates in the holding cell, generating the relevant messages
+ /// for our counterparty.
+ fn free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+ if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
+ log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
+ if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
+
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+ updates: Vec::new(),
+ };
+
+ let mut htlc_updates = Vec::new();
+ mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
+ let mut update_add_count = 0;
+ let mut update_fulfill_count = 0;
+ let mut update_fail_count = 0;
+ let mut htlcs_to_fail = Vec::new();
+ for htlc_update in htlc_updates.drain(..) {
+ // Note that this *can* fail, though it should be due to rather-rare conditions on
+ // fee races with adding too many outputs which push our total payments just over
+ // the limit. In case it's less rare than I anticipate, we may want to revisit
+ // handling this case better and maybe fulfilling some of the HTLCs while attempting
+ // to rebalance channels.
+ match &htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+ skimmed_fee_msat, ..
+ } => {
+ match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
+ onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
+ {
+ Ok(_) => update_add_count += 1,
+ Err(e) => {
+ match e {
+ ChannelError::Ignore(ref msg) => {
+ log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
+ // If we fail to send here, then this HTLC should
+ // be failed backwards. Failing to send here
+ // indicates that this HTLC may keep being put back
+ // into the holding cell without ever being
+ // successfully forwarded/failed/fulfilled, causing
+ // our counterparty to eventually close on us.
+ htlcs_to_fail.push((source.clone(), *payment_hash));
+ },
+ _ => {
+ panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
+ },
+ }
+ }