- let mut htlc_updates = Vec::new();
- mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
- let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut htlcs_to_fail = Vec::new();
- for htlc_update in htlc_updates.drain(..) {
- // Note that this *can* fail, though it should be due to rather-rare conditions on
- // fee races with adding too many outputs which push our total payments just over
- // the limit. In case it's less rare than I anticipate, we may want to revisit
- // handling this case better and maybe fulfilling some of the HTLCs while attempting
- // to rebalance channels.
- match &htlc_update {
- &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
- match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
- Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
- Err(e) => {
- match e {
- ChannelError::Ignore(ref msg) => {
- log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
- log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id()));
- // If we fail to send here, then this HTLC should
- // be failed backwards. Failing to send here
- // indicates that this HTLC may keep being put back
- // into the holding cell without ever being
- // successfully forwarded/failed/fulfilled, causing
- // our counterparty to eventually close on us.
- htlcs_to_fail.push((source.clone(), *payment_hash));
- },
- _ => {
- panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
- },
- }
- }
- }
- },
- &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
- // If an HTLC claim was previously added to the holding cell (via
- // `get_update_fulfill_htlc`, then generating the claim message itself must
- // not fail - any in between attempts to claim the HTLC will have resulted
- // in it hitting the holding cell again and we cannot change the state of a
- // holding cell HTLC from fulfill to anything else.
- let (update_fulfill_msg_option, mut additional_monitor_update) =
- if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
- (msg, monitor_update)
- } else { unreachable!() };
- update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
- monitor_update.updates.append(&mut additional_monitor_update.updates);
+ if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
+ }
+
+ // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
+ // will be retransmitted.
+ self.context.last_sent_closing_fee = None;
+ self.context.pending_counterparty_closing_signed = None;
+ self.context.closing_fee_limits = None;
+
+ let mut inbound_drop_count = 0;
+ self.context.pending_inbound_htlcs.retain(|htlc| {
+ match htlc.state {
+ InboundHTLCState::RemoteAnnounced(_) => {
+ // They sent us an update_add_htlc but we never got the commitment_signed.
+ // We'll tell them what commitment_signed we're expecting next and they'll drop
+ // this HTLC accordingly
+ inbound_drop_count += 1;
+ false
+ },
+ InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
+ // We received a commitment_signed updating this HTLC and (at least hopefully)
+ // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
+ // in response to it yet, so don't touch it.
+ true
+ },
+ InboundHTLCState::Committed => true,
+ InboundHTLCState::LocalRemoved(_) => {
+ // We (hopefully) sent a commitment_signed updating this HTLC (which we can
+ // re-transmit if needed) and they may have even sent a revoke_and_ack back
+ // (that we missed). Keep this around for now and if they tell us they missed
+ // the commitment_signed we can re-transmit the update then.
+ true
+ },
+ }
+ });
+ self.context.next_counterparty_htlc_id -= inbound_drop_count;
+
+ if let Some((_, update_state)) = self.context.pending_update_fee {
+ if update_state == FeeUpdateState::RemoteAnnounced {
+ debug_assert!(!self.context.is_outbound());
+ self.context.pending_update_fee = None;
+ }
+ }
+
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
+ // They sent us an update to remove this but haven't yet sent the corresponding
+ // commitment_signed, we need to move it back to Committed and they can re-send
+ // the update upon reconnection.
+ htlc.state = OutboundHTLCState::Committed;
+ }
+ }
+
+ self.context.sent_message_awaiting_response = None;
+
+ self.context.channel_state |= ChannelState::PeerDisconnected as u32;
+ log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
+ }
+
+ /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
+ /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
+ /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
+ /// update completes (potentially immediately).
+ /// The messages which were generated with the monitor update must *not* have been sent to the
+ /// remote end, and must instead have been dropped. They will be regenerated when
+ /// [`Self::monitor_updating_restored`] is called.
+ ///
+ /// [`ChannelManager`]: super::channelmanager::ChannelManager
+ /// [`chain::Watch`]: crate::chain::Watch
+ /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
+ fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
+ resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
+ mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
+ mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
+ ) {
+ self.context.monitor_pending_revoke_and_ack |= resend_raa;
+ self.context.monitor_pending_commitment_signed |= resend_commitment;
+ self.context.monitor_pending_channel_ready |= resend_channel_ready;
+ self.context.monitor_pending_forwards.append(&mut pending_forwards);
+ self.context.monitor_pending_failures.append(&mut pending_fails);
+ self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
+ self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
+ }
+
+ /// Indicates that the latest ChannelMonitor update has been committed by the client
+ /// successfully and we should restore normal operation. Returns messages which should be sent
+ /// to the remote side.
+ pub fn monitor_updating_restored<L: Deref, NS: Deref>(
+ &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
+ user_config: &UserConfig, best_block_height: u32
+ ) -> MonitorRestoreUpdates
+ where
+ L::Target: Logger,
+ NS::Target: NodeSigner
+ {
+ assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
+ self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+
+ // If we're past (or at) the FundingSent stage on an outbound channel, try to
+ // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
+ // first received the funding_signed.
+ let mut funding_broadcastable =
+ if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+ self.context.funding_transaction.take()
+ } else { None };
+ // That said, if the funding transaction is already confirmed (ie we're active with a
+ // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
+ if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+ funding_broadcastable = None;
+ }
+
+ // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
+ // (and we assume the user never directly broadcasts the funding transaction and waits for
+ // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
+ // * an inbound channel that failed to persist the monitor on funding_created and we got
+ // the funding transaction confirmed before the monitor was persisted, or
+ // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
+ let channel_ready = if self.context.monitor_pending_channel_ready {
+ assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
+ "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
+ self.context.monitor_pending_channel_ready = false;
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ })
+ } else { None };
+
+ let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
+
+ let mut accepted_htlcs = Vec::new();
+ mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
+ let mut failed_htlcs = Vec::new();
+ mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
+ let mut finalized_claimed_htlcs = Vec::new();
+ mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
+
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ return MonitorRestoreUpdates {
+ raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
+ accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ };
+ }
+
+ let raa = if self.context.monitor_pending_revoke_and_ack {
+ Some(self.get_last_revoke_and_ack())
+ } else { None };
+ let commitment_update = if self.context.monitor_pending_commitment_signed {
+ self.mark_awaiting_response();
+ Some(self.get_last_commitment_update(logger))
+ } else { None };
+
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ let order = self.context.resend_order.clone();
+ log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
+ log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+ if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
+ match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
+ MonitorRestoreUpdates {
+ raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ }
+ }
+
+ pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.is_outbound() {
+ return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
+ }
+ Channel::<Signer>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
+ let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
+
+ self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
+ self.context.update_time_counter += 1;
+ // If the feerate has increased over the previous dust buffer (note that
+ // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
+ // won't be pushed over our dust exposure limit by the feerate increase.
+ if feerate_over_dust_buffer {
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+ msg.feerate_per_kw, holder_tx_dust_exposure)));
+ }
+ if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+ msg.feerate_per_kw, counterparty_tx_dust_exposure)));
+ }
+ }
+ Ok(())
+ }
+
+ fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
+ msgs::RevokeAndACK {
+ channel_id: self.context.channel_id,
+ per_commitment_secret,
+ next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ }
+ }
+
+ fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
+ let mut update_add_htlcs = Vec::new();
+ let mut update_fulfill_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
+
+ for htlc in self.context.pending_outbound_htlcs.iter() {
+ if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
+ update_add_htlcs.push(msgs::UpdateAddHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ amount_msat: htlc.amount_msat,
+ payment_hash: htlc.payment_hash,
+ cltv_expiry: htlc.cltv_expiry,
+ onion_routing_packet: (**onion_packet).clone(),
+ skimmed_fee_msat: htlc.skimmed_fee_msat,
+ });
+ }
+ }
+
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ match reason {
+ &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
+ update_fail_htlcs.push(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ reason: err_packet.clone()
+ });