+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+ updates: Vec::new(),
+ };
+
+ let mut htlc_updates = Vec::new();
+ mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
+ let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut htlcs_to_fail = Vec::new();
+ for htlc_update in htlc_updates.drain(..) {
+ // Note that this *can* fail, though it should be due to rather-rare conditions on
+ // fee races with adding too many outputs which push our total payments just over
+ // the limit. In case it's less rare than I anticipate, we may want to revisit
+ // handling this case better and maybe fulfilling some of the HTLCs while attempting
+ // to rebalance channels.
+ match &htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
+ match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
+ Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
+ Err(e) => {
+ match e {
+ ChannelError::Ignore(ref msg) => {
+ log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
+ log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
+ // If we fail to send here, then this HTLC should
+ // be failed backwards. Failing to send here
+ // indicates that this HTLC may keep being put back
+ // into the holding cell without ever being
+ // successfully forwarded/failed/fulfilled, causing
+ // our counterparty to eventually close on us.
+ htlcs_to_fail.push((source.clone(), *payment_hash));
+ },
+ _ => {
+ panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
+ },
+ }
+ }
+ }
+ },
+ &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
+ // If an HTLC claim was previously added to the holding cell (via
+ // `get_update_fulfill_htlc`, then generating the claim message itself must
+ // not fail - any in between attempts to claim the HTLC will have resulted
+ // in it hitting the holding cell again and we cannot change the state of a
+ // holding cell HTLC from fulfill to anything else.
+ let (update_fulfill_msg_option, mut additional_monitor_update) =
+ if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+ (msg, monitor_update)
+ } else { unreachable!() };
+ update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+ monitor_update.updates.append(&mut additional_monitor_update.updates);
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
+ match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
+ Ok(update_fail_msg_option) => {
+ // If an HTLC failure was previously added to the holding cell (via
+ // `queue_fail_htlc`) then generating the fail message itself must
+ // not fail - we should never end up in a state where we double-fail
+ // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+ // for a full revocation before failing.
+ update_fail_htlcs.push(update_fail_msg_option.unwrap())
+ },
+ Err(e) => {
+ if let ChannelError::Ignore(_) = e {}
+ else {
+ panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+ }
+ }
+ }
+ },
+ }
+ }
+ if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
+ return (None, htlcs_to_fail);
+ }
+ let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
+ self.send_update_fee(feerate, false, logger)
+ } else {
+ None
+ };
+
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+ // but we want them to be strictly increasing by one, so reset it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
+ log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
+
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
+ } else {
+ (None, Vec::new())
+ }
+ }
+
+ /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
+ /// commitment_signed message here in case we had pending outbound HTLCs to add which were
+ /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
+ /// generating an appropriate error *after* the channel state has been updated based on the
+ /// revoke_and_ack message.
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
+ where L::Target: Logger,
+ {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
+ }
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
+ }
+
+ let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
+
+ if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
+ if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
+ return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
+ }
+ }
+
+ if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
+ // Our counterparty seems to have burned their coins to us (by revoking a state when we
+ // haven't given them a new commitment transaction to broadcast). We should probably
+ // take advantage of this by updating our channel monitor, sending them an error, and
+ // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
+ // lot of work, and there's some chance this is all a misunderstanding anyway.
+ // We have to do *something*, though, since our signer may get mad at us for otherwise
+ // jumping a remote commitment number, so best to just force-close and move on.
+ return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
+ }
+
+ #[cfg(any(test, fuzzing))]
+ {
+ *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ }
+
+ self.context.holder_signer.validate_counterparty_revocation(
+ self.context.cur_counterparty_commitment_transaction_number + 1,
+ &secret
+ ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+
+ self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
+ idx: self.context.cur_counterparty_commitment_transaction_number + 1,
+ secret: msg.per_commitment_secret,
+ }],
+ };
+
+ // Update state now that we've passed all the can-fail calls...
+ // (note that we may still fail to generate the new commitment_signed message, but that's
+ // OK, we step the channel here and *then* if the new generation fails we can fail the
+ // channel based on that, but stepping stuff here should be safe either way.
+ self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+ self.context.sent_message_awaiting_response = None;
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+ if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ }
+
+ log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
+ let mut to_forward_infos = Vec::new();
+ let mut revoked_htlcs = Vec::new();
+ let mut finalized_claimed_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
+ let mut require_commitment = false;
+ let mut value_to_self_msat_diff: i64 = 0;
+
+ {
+ // Take references explicitly so that we can hold multiple references to self.context.
+ let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
+ let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
+
+ // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+ pending_inbound_htlcs.retain(|htlc| {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ value_to_self_msat_diff += htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ pending_outbound_htlcs.retain(|htlc| {
+ if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
+ log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
+ if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
+ revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
+ } else {
+ finalized_claimed_htlcs.push(htlc.source.clone());
+ // They fulfilled, so we sent them money
+ value_to_self_msat_diff -= htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ for htlc in pending_inbound_htlcs.iter_mut() {
+ let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
+ true
+ } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
+ true
+ } else { false };
+ if swap {
+ let mut state = InboundHTLCState::Committed;
+ mem::swap(&mut state, &mut htlc.state);
+
+ if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
+ require_commitment = true;
+ } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
+ match forward_info {
+ PendingHTLCStatus::Fail(fail_msg) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
+ require_commitment = true;
+ match fail_msg {
+ HTLCFailureMsg::Relay(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
+ update_fail_htlcs.push(msg)
+ },
+ HTLCFailureMsg::Malformed(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
+ update_fail_malformed_htlcs.push(msg)
+ },
+ }
+ },
+ PendingHTLCStatus::Forward(forward_info) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
+ to_forward_infos.push((forward_info, htlc.htlc_id));
+ htlc.state = InboundHTLCState::Committed;
+ }
+ }
+ }
+ }
+ }
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+ log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
+ htlc.state = OutboundHTLCState::Committed;
+ }
+ if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+ require_commitment = true;
+ }
+ }
+ }
+ self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
+
+ if let Some((feerate, update_state)) = self.context.pending_update_fee {
+ match update_state {
+ FeeUpdateState::Outbound => {
+ debug_assert!(self.context.is_outbound());
+ log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ },
+ FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
+ FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
+ debug_assert!(!self.context.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+ require_commitment = true;
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ },
+ }
+ }
+
+ if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
+ // We can't actually generate a new commitment transaction (incl by freeing holding
+ // cells) while we can't update the monitor, so we just return what we have.
+ if require_commitment {
+ self.context.monitor_pending_commitment_signed = true;
+ // When the monitor updating is restored we'll call get_last_commitment_update(),
+ // which does not update state, but we're definitely now awaiting a remote revoke
+ // before we can step forward any more, so set it here.
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ }
+ self.context.monitor_pending_forwards.append(&mut to_forward_infos);
+ self.context.monitor_pending_failures.append(&mut revoked_htlcs);
+ self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
+ return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
+ }
+
+ match self.free_holding_cell_htlcs(logger) {
+ (Some(_), htlcs_to_fail) => {
+ let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
+ // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+ },
+ (None, htlcs_to_fail) => {
+ if require_commitment {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
+ log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+ } else {
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
+ self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+ }
+ }
+ }
+ }
+
+ /// Queues up an outbound update fee by placing it in the holding cell. You should call
+ /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+ /// commitment update.
+ pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
+ let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
+ assert!(msg_opt.is_none(), "We forced holding cell?");
+ }
+
+ /// Adds a pending update to this channel. See the doc for send_htlc for
+ /// further details on the optionness of the return value.
+ /// If our balance is too low to cover the cost of the next commitment transaction at the
+ /// new feerate, the update is cancelled.
+ ///
+ /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
+ /// [`Channel`] if `force_holding_cell` is false.
+ fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
+ if !self.context.is_outbound() {
+ panic!("Cannot send fee from inbound channel");
+ }
+ if !self.context.is_usable() {
+ panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
+ }
+ if !self.context.is_live() {
+ panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
+ }
+
+ // Before proposing a feerate update, check that we can actually afford the new fee.
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
+ let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000;
+ let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
+ if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
+ //TODO: auto-close after a number of failures?
+ log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
+ return None;
+ }
+
+ // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ return None;
+ }
+ if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ return None;
+ }
+
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ force_holding_cell = true;
+ }
+
+ if force_holding_cell {
+ self.context.holding_cell_update_fee = Some(feerate_per_kw);
+ return None;
+ }
+
+ debug_assert!(self.context.pending_update_fee.is_none());
+ self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
+
+ Some(msgs::UpdateFee {
+ channel_id: self.context.channel_id,
+ feerate_per_kw,
+ })
+ }
+
+ /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
+ /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
+ /// resent.
+ /// No further message handling calls may be made until a channel_reestablish dance has
+ /// completed.
+ pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) where L::Target: Logger {
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ return;
+ }
+
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
+ // While the below code should be idempotent, it's simpler to just return early, as
+ // redundant disconnect events can fire, though they should be rare.
+ return;
+ }
+
+ if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
+ }
+
+ // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
+ // will be retransmitted.
+ self.context.last_sent_closing_fee = None;
+ self.context.pending_counterparty_closing_signed = None;
+ self.context.closing_fee_limits = None;
+
+ let mut inbound_drop_count = 0;
+ self.context.pending_inbound_htlcs.retain(|htlc| {
+ match htlc.state {
+ InboundHTLCState::RemoteAnnounced(_) => {
+ // They sent us an update_add_htlc but we never got the commitment_signed.
+ // We'll tell them what commitment_signed we're expecting next and they'll drop
+ // this HTLC accordingly
+ inbound_drop_count += 1;
+ false
+ },
+ InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
+ // We received a commitment_signed updating this HTLC and (at least hopefully)
+ // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
+ // in response to it yet, so don't touch it.
+ true
+ },
+ InboundHTLCState::Committed => true,
+ InboundHTLCState::LocalRemoved(_) => {
+ // We (hopefully) sent a commitment_signed updating this HTLC (which we can
+ // re-transmit if needed) and they may have even sent a revoke_and_ack back
+ // (that we missed). Keep this around for now and if they tell us they missed
+ // the commitment_signed we can re-transmit the update then.
+ true
+ },
+ }
+ });
+ self.context.next_counterparty_htlc_id -= inbound_drop_count;
+
+ if let Some((_, update_state)) = self.context.pending_update_fee {
+ if update_state == FeeUpdateState::RemoteAnnounced {
+ debug_assert!(!self.context.is_outbound());
+ self.context.pending_update_fee = None;
+ }
+ }
+
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
+ // They sent us an update to remove this but haven't yet sent the corresponding
+ // commitment_signed, we need to move it back to Committed and they can re-send
+ // the update upon reconnection.
+ htlc.state = OutboundHTLCState::Committed;
+ }
+ }
+
+ self.context.sent_message_awaiting_response = None;
+
+ self.context.channel_state |= ChannelState::PeerDisconnected as u32;
+ log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
+ }
+
+ /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
+ /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
+ /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
+ /// update completes (potentially immediately).
+ /// The messages which were generated with the monitor update must *not* have been sent to the
+ /// remote end, and must instead have been dropped. They will be regenerated when
+ /// [`Self::monitor_updating_restored`] is called.
+ ///
+ /// [`ChannelManager`]: super::channelmanager::ChannelManager
+ /// [`chain::Watch`]: crate::chain::Watch
+ /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
+ fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
+ resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
+ mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
+ mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
+ ) {
+ self.context.monitor_pending_revoke_and_ack |= resend_raa;
+ self.context.monitor_pending_commitment_signed |= resend_commitment;
+ self.context.monitor_pending_channel_ready |= resend_channel_ready;
+ self.context.monitor_pending_forwards.append(&mut pending_forwards);
+ self.context.monitor_pending_failures.append(&mut pending_fails);
+ self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
+ self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
+ }
+
+ /// Indicates that the latest ChannelMonitor update has been committed by the client
+ /// successfully and we should restore normal operation. Returns messages which should be sent
+ /// to the remote side.
+ pub fn monitor_updating_restored<L: Deref, NS: Deref>(
+ &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
+ user_config: &UserConfig, best_block_height: u32
+ ) -> MonitorRestoreUpdates
+ where
+ L::Target: Logger,
+ NS::Target: NodeSigner
+ {
+ assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
+ self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+ let mut found_blocked = false;
+ self.context.pending_monitor_updates.retain(|upd| {
+ if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
+ if upd.blocked { found_blocked = true; }
+ upd.blocked
+ });
+
+ // If we're past (or at) the FundingSent stage on an outbound channel, try to
+ // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
+ // first received the funding_signed.
+ let mut funding_broadcastable =
+ if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+ self.context.funding_transaction.take()
+ } else { None };
+ // That said, if the funding transaction is already confirmed (ie we're active with a
+ // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
+ if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+ funding_broadcastable = None;
+ }
+
+ // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
+ // (and we assume the user never directly broadcasts the funding transaction and waits for
+ // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
+ // * an inbound channel that failed to persist the monitor on funding_created and we got
+ // the funding transaction confirmed before the monitor was persisted, or
+ // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
+ let channel_ready = if self.context.monitor_pending_channel_ready {
+ assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
+ "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
+ self.context.monitor_pending_channel_ready = false;
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ })
+ } else { None };
+
+ let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
+
+ let mut accepted_htlcs = Vec::new();
+ mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
+ let mut failed_htlcs = Vec::new();
+ mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
+ let mut finalized_claimed_htlcs = Vec::new();
+ mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
+
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ return MonitorRestoreUpdates {
+ raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
+ accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ };
+ }
+
+ let raa = if self.context.monitor_pending_revoke_and_ack {
+ Some(self.get_last_revoke_and_ack())
+ } else { None };
+ let commitment_update = if self.context.monitor_pending_commitment_signed {
+ self.mark_awaiting_response();
+ Some(self.get_last_commitment_update(logger))
+ } else { None };
+
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ let order = self.context.resend_order.clone();
+ log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
+ log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+ if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
+ match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
+ MonitorRestoreUpdates {
+ raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ }
+ }
+
+ pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.is_outbound() {
+ return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
+ }
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
+ let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
+
+ self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
+ self.context.update_time_counter += 1;
+ // If the feerate has increased over the previous dust buffer (note that
+ // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
+ // won't be pushed over our dust exposure limit by the feerate increase.
+ if feerate_over_dust_buffer {
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+ msg.feerate_per_kw, holder_tx_dust_exposure)));
+ }
+ if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+ msg.feerate_per_kw, counterparty_tx_dust_exposure)));
+ }
+ }
+ Ok(())
+ }
+
+ fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
+ msgs::RevokeAndACK {
+ channel_id: self.context.channel_id,
+ per_commitment_secret,
+ next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ }
+ }
+
+ fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
+ let mut update_add_htlcs = Vec::new();
+ let mut update_fulfill_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
+
+ for htlc in self.context.pending_outbound_htlcs.iter() {
+ if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
+ update_add_htlcs.push(msgs::UpdateAddHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ amount_msat: htlc.amount_msat,
+ payment_hash: htlc.payment_hash,
+ cltv_expiry: htlc.cltv_expiry,
+ onion_routing_packet: (**onion_packet).clone(),
+ });
+ }
+ }
+
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ match reason {
+ &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
+ update_fail_htlcs.push(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ reason: err_packet.clone()
+ });
+ },
+ &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
+ update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ sha256_of_onion: sha256_of_onion.clone(),
+ failure_code: failure_code.clone(),
+ });
+ },
+ &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
+ update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ payment_preimage: payment_preimage.clone(),
+ });
+ },
+ }
+ }
+ }
+
+ let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
+ Some(msgs::UpdateFee {
+ channel_id: self.context.channel_id(),
+ feerate_per_kw: self.context.pending_update_fee.unwrap().0,
+ })
+ } else { None };
+
+ log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
+ log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
+ msgs::CommitmentUpdate {
+ update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
+ commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
+ }
+ }
+
+ /// May panic if some calls other than message-handling calls (which will all Err immediately)
+ /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
+ ///
+ /// Some links printed in log lines are included here to check them during build (when run with
+ /// `cargo doc --document-private-items`):
+ /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
+ /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
+ pub fn channel_reestablish<L: Deref, NS: Deref>(
+ &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
+ genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
+ ) -> Result<ReestablishResponses, ChannelError>
+ where
+ L::Target: Logger,
+ NS::Target: NodeSigner
+ {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
+ // almost certainly indicates we are going to end up out-of-sync in some way, so we
+ // just close here instead of trying to recover.
+ return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
+ }
+
+ if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
+ msg.next_local_commitment_number == 0 {
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
+ }