+ pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
+ // When calculating our exposure to dust HTLCs, we assume that the channel feerate
+ // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
+ // whichever is higher. This ensures that we aren't suddenly exposed to significantly
+ // more dust balance if the feerate increases when we have several HTLCs pending
+ // which are near the dust limit.
+ let mut feerate_per_kw = self.feerate_per_kw;
+ // If there's a pending update fee, use it to ensure we aren't under-estimating
+ // potential feerate updates coming soon.
+ if let Some((feerate, _)) = self.pending_update_fee {
+ feerate_per_kw = cmp::max(feerate_per_kw, feerate);
+ }
+ if let Some(feerate) = outbound_feerate_update {
+ feerate_per_kw = cmp::max(feerate_per_kw, feerate);
+ }
+ cmp::max(2530, feerate_per_kw * 1250 / 1000)
+ }
+
+ /// Get forwarding information for the counterparty.
+ pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
+ self.counterparty_forwarding_info.clone()
+ }
+
+ /// Returns a HTLCStats about inbound pending htlcs
+ fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
+ let context = self;
+ let mut stats = HTLCStats {
+ pending_htlcs: context.pending_inbound_htlcs.len() as u32,
+ pending_htlcs_value_msat: 0,
+ on_counterparty_tx_dust_exposure_msat: 0,
+ on_holder_tx_dust_exposure_msat: 0,
+ holding_cell_msat: 0,
+ on_holder_tx_holding_cell_htlcs_count: 0,
+ };
+
+ let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
+ (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
+ dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
+ let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
+ for ref htlc in context.pending_inbound_htlcs.iter() {
+ stats.pending_htlcs_value_msat += htlc.amount_msat;
+ if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
+ stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ }
+ stats
+ }
+
+ /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
+ fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
+ let context = self;
+ let mut stats = HTLCStats {
+ pending_htlcs: context.pending_outbound_htlcs.len() as u32,
+ pending_htlcs_value_msat: 0,
+ on_counterparty_tx_dust_exposure_msat: 0,
+ on_holder_tx_dust_exposure_msat: 0,
+ holding_cell_msat: 0,
+ on_holder_tx_holding_cell_htlcs_count: 0,
+ };
+
+ let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
+ (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
+ dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
+ let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
+ for ref htlc in context.pending_outbound_htlcs.iter() {
+ stats.pending_htlcs_value_msat += htlc.amount_msat;
+ if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
+ stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
+ }
+ }
+
+ for update in context.holding_cell_htlc_updates.iter() {
+ if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
+ stats.pending_htlcs += 1;
+ stats.pending_htlcs_value_msat += amount_msat;
+ stats.holding_cell_msat += amount_msat;
+ if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
+ stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
+ }
+ if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
+ stats.on_holder_tx_dust_exposure_msat += amount_msat;
+ } else {
+ stats.on_holder_tx_holding_cell_htlcs_count += 1;
+ }
+ }
+ }
+ stats
+ }
+
+ /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
+ /// Doesn't bother handling the
+ /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
+ /// corner case properly.
+ pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+ -> AvailableBalances
+ where F::Target: FeeEstimator
+ {
+ let context = &self;
+ // Note that we have to handle overflow due to the above case.
+ let inbound_stats = context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = context.get_outbound_pending_htlc_stats(None);
+
+ let outbound_capacity_msat = context.value_to_self_msat
+ .saturating_sub(outbound_stats.pending_htlcs_value_msat)
+ .saturating_sub(
+ context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
+
+ let mut available_capacity_msat = outbound_capacity_msat;
+
+ if context.is_outbound() {
+ // We should mind channel commit tx fee when computing how much of the available capacity
+ // can be used in the next htlc. Mirrors the logic in send_htlc.
+ //
+ // The fee depends on whether the amount we will be sending is above dust or not,
+ // and the answer will in turn change the amount itself — making it a circular
+ // dependency.
+ // This complicates the computation around dust-values, up to the one-htlc-value.
+ let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
+ if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
+ }
+
+ let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
+ let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
+ let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
+ let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
+
+ // We will first subtract the fee as if we were above-dust. Then, if the resulting
+ // value ends up being below dust, we have this fee available again. In that case,
+ // match the value to right-below-dust.
+ let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
+ if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
+ let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
+ debug_assert!(one_htlc_difference_msat != 0);
+ capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
+ capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
+ available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
+ } else {
+ available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
+ }
+ } else {
+ // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
+ // sending a new HTLC won't reduce their balance below our reserve threshold.
+ let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
+ if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
+ }
+
+ let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
+ let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
+
+ let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
+ let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
+ .saturating_sub(inbound_stats.pending_htlcs_value_msat);
+
+ if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
+ // If another HTLC's fee would reduce the remote's balance below the reserve limit
+ // we've selected for them, we can only send dust HTLCs.
+ available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
+ }
+ }
+
+ let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
+
+ // If we get close to our maximum dust exposure, we end up in a situation where we can send
+ // between zero and the remaining dust exposure limit remaining OR above the dust limit.
+ // Because we cannot express this as a simple min/max, we prefer to tell the user they can
+ // send above the dust limit (as the router can always overpay to meet the dust limit).
+ let mut remaining_msat_below_dust_exposure_limit = None;
+ let mut dust_exposure_dust_limit_msat = 0;
+ let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
+
+ let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
+ } else {
+ let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
+ (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+ context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
+ remaining_msat_below_dust_exposure_limit =
+ Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+ dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
+ }
+
+ let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
+ remaining_msat_below_dust_exposure_limit = Some(cmp::min(
+ remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
+ max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
+ dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
+ }
+
+ if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
+ if available_capacity_msat < dust_exposure_dust_limit_msat {
+ available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
+ } else {
+ next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
+ }
+ }
+
+ available_capacity_msat = cmp::min(available_capacity_msat,
+ context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
+
+ if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
+ available_capacity_msat = 0;
+ }
+
+ AvailableBalances {
+ inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
+ - context.value_to_self_msat as i64
+ - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
+ - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
+ 0) as u64,
+ outbound_capacity_msat,
+ next_outbound_htlc_limit_msat: available_capacity_msat,
+ next_outbound_htlc_minimum_msat,
+ }
+ }
+
+ pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
+ let context = &self;
+ (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
+ }
+
+ /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
+ /// number of pending HTLCs that are on track to be in our next commitment tx.
+ ///
+ /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
+ /// `fee_spike_buffer_htlc` is `Some`.
+ ///
+ /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
+ /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
+ ///
+ /// Dust HTLCs are excluded.
+ fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
+ let context = &self;
+ assert!(context.is_outbound());
+
+ let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+ context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
+ let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
+
+ let mut addl_htlcs = 0;
+ if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
+ match htlc.origin {
+ HTLCInitiator::LocalOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
+ addl_htlcs += 1;
+ }
+ },
+ HTLCInitiator::RemoteOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
+ addl_htlcs += 1;
+ }
+ }
+ }
+
+ let mut included_htlcs = 0;
+ for ref htlc in context.pending_inbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
+ continue
+ }
+ // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
+ // transaction including this HTLC if it times out before they RAA.
+ included_htlcs += 1;
+ }
+
+ for ref htlc in context.pending_outbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
+ continue
+ }
+ match htlc.state {
+ OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
+ OutboundHTLCState::Committed => included_htlcs += 1,
+ OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
+ // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
+ // transaction won't be generated until they send us their next RAA, which will mean
+ // dropping any HTLCs in this state.
+ _ => {},
+ }
+ }
+
+ for htlc in context.holding_cell_htlc_updates.iter() {
+ match htlc {
+ &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
+ if amount_msat / 1000 < real_dust_limit_timeout_sat {
+ continue
+ }
+ included_htlcs += 1
+ },
+ _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
+ // ack we're guaranteed to never include them in commitment txs anymore.
+ }
+ }
+
+ let num_htlcs = included_htlcs + addl_htlcs;
+ let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
+ #[cfg(any(test, fuzzing))]
+ {
+ let mut fee = res;
+ if fee_spike_buffer_htlc.is_some() {
+ fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
+ }
+ let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
+ + context.holding_cell_htlc_updates.len();
+ let commitment_tx_info = CommitmentTxInfoCached {
+ fee,
+ total_pending_htlcs,
+ next_holder_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
+ HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
+ },
+ next_counterparty_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
+ HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
+ },
+ feerate: context.feerate_per_kw,
+ };
+ *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
+ }
+ res
+ }
+
+ /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
+ /// pending HTLCs that are on track to be in their next commitment tx
+ ///
+ /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
+ /// `fee_spike_buffer_htlc` is `Some`.
+ ///
+ /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
+ /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
+ ///
+ /// Dust HTLCs are excluded.
+ fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
+ let context = &self;
+ assert!(!context.is_outbound());
+
+ let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ (0, 0)
+ } else {
+ (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
+ context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
+ };
+ let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
+ let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
+
+ let mut addl_htlcs = 0;
+ if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
+ match htlc.origin {
+ HTLCInitiator::LocalOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
+ addl_htlcs += 1;
+ }
+ },
+ HTLCInitiator::RemoteOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
+ addl_htlcs += 1;
+ }
+ }
+ }
+
+ // When calculating the set of HTLCs which will be included in their next commitment_signed, all
+ // non-dust inbound HTLCs are included (as all states imply it will be included) and only
+ // committed outbound HTLCs, see below.
+ let mut included_htlcs = 0;
+ for ref htlc in context.pending_inbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
+ continue
+ }
+ included_htlcs += 1;
+ }
+
+ for ref htlc in context.pending_outbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
+ continue
+ }
+ // We only include outbound HTLCs if it will not be included in their next commitment_signed,
+ // i.e. if they've responded to us with an RAA after announcement.
+ match htlc.state {
+ OutboundHTLCState::Committed => included_htlcs += 1,
+ OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
+ OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
+ _ => {},
+ }
+ }
+
+ let num_htlcs = included_htlcs + addl_htlcs;
+ let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
+ #[cfg(any(test, fuzzing))]
+ {
+ let mut fee = res;
+ if fee_spike_buffer_htlc.is_some() {
+ fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
+ }
+ let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
+ let commitment_tx_info = CommitmentTxInfoCached {
+ fee,
+ total_pending_htlcs,
+ next_holder_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
+ HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
+ },
+ next_counterparty_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
+ HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
+ },
+ feerate: context.feerate_per_kw,
+ };
+ *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
+ }
+ res
+ }
+
+ /// Returns transaction if there is pending funding transaction that is yet to broadcast
+ pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
+ if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
+ self.funding_transaction.clone()
+ } else {
+ None
+ }
+ }
+
+ /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
+ /// shutdown of this channel - no more calls into this Channel may be made afterwards except
+ /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
+ /// Also returns the list of payment_hashes for channels which we can safely fail backwards
+ /// immediately (others we will have to allow to time out).
+ pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+ // Note that we MUST only generate a monitor update that indicates force-closure - we're
+ // called during initialization prior to the chain_monitor in the encompassing ChannelManager
+ // being fully configured in some cases. Thus, its likely any monitor events we generate will
+ // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
+ assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
+
+ // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
+ // return them to fail the payment.
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+ let counterparty_node_id = self.get_counterparty_node_id();
+ for htlc_update in self.holding_cell_htlc_updates.drain(..) {
+ match htlc_update {
+ HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
+ dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
+ },
+ _ => {}
+ }
+ }
+ let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
+ // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
+ // returning a channel monitor update here would imply a channel monitor update before
+ // we even registered the channel monitor to begin with, which is invalid.
+ // Thus, if we aren't actually at a point where we could conceivably broadcast the
+ // funding transaction, don't return a funding txo (which prevents providing the
+ // monitor update to the user, even if we return one).
+ // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
+ if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+ self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
+ Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+ }))
+ } else { None }
+ } else { None };
+
+ self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.update_time_counter += 1;
+ (monitor_update, dropped_outbound_htlcs)
+ }
+}
+
+// Internal utility functions for channels
+
+/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
+/// `channel_value_satoshis` in msat, set through
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
+///
+/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
+///
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
+fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
+ let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
+ 1
+ } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
+ 100
+ } else {
+ config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
+ };
+ channel_value_satoshis * 10 * configured_percent
+}
+
+/// Returns a minimum channel reserve value the remote needs to maintain,
+/// required by us according to the configured or default
+/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
+///
+/// Guaranteed to return a value no larger than channel_value_satoshis
+///
+/// This is used both for outbound and inbound channels and has lower bound
+/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
+pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
+ let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
+ cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
+}
+
+/// This is for legacy reasons, present for forward-compatibility.
+/// LDK versions older than 0.0.104 don't know how read/handle values other than default
+/// from storage. Hence, we use this function to not persist default values of
+/// `holder_selected_channel_reserve_satoshis` for channels into storage.
+pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
+ let (q, _) = channel_value_satoshis.overflowing_div(100);
+ cmp::min(channel_value_satoshis, cmp::max(q, 1000))
+}
+
+// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
+// Note that num_htlcs should not include dust HTLCs.
+#[inline]
+fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
+ feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
+}
+
+// Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
+// Note that num_htlcs should not include dust HTLCs.
+fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
+ // Note that we need to divide before multiplying to round properly,
+ // since the lowest denomination of bitcoin on-chain is the satoshi.
+ (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
+}
+
+// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
+// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
+// calling channel_id() before we're set up or things like get_funding_signed on an
+// inbound channel.
+//
+// Holder designates channel data owned for the benefit of the user client.
+// Counterparty designates channel data owned by the another channel participant entity.
+pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
+ pub context: ChannelContext<SP>,
+}
+
+#[cfg(any(test, fuzzing))]
+struct CommitmentTxInfoCached {
+ fee: u64,
+ total_pending_htlcs: usize,
+ next_holder_htlc_id: u64,
+ next_counterparty_htlc_id: u64,
+ feerate: u32,
+}
+
+impl<SP: Deref> Channel<SP> where
+ SP::Target: SignerProvider,
+ <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
+{
+ fn check_remote_fee<F: Deref, L: Deref>(
+ channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
+ feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
+ ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
+ {
+ // We only bound the fee updates on the upper side to prevent completely absurd feerates,
+ // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
+ // We generally don't care too much if they set the feerate to something very high, but it
+ // could result in the channel being useless due to everything being dust. This doesn't
+ // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
+ // zero fee, so their fee is no longer considered to determine dust limits.
+ if !channel_type.supports_anchors_zero_fee_htlc_tx() {
+ let upper_limit = cmp::max(250 * 25,
+ fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
+ if feerate_per_kw as u64 > upper_limit {
+ return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
+ }
+ }
+
+ // We can afford to use a lower bound with anchors than previously since we can now bump
+ // fees when broadcasting our commitment. However, we must still make sure we meet the
+ // minimum mempool feerate, until package relay is deployed, such that we can ensure the
+ // commitment transaction propagates throughout node mempools on its own.
+ let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
+ ConfirmationTarget::MempoolMinimum
+ } else {
+ ConfirmationTarget::Background
+ };
+ let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
+ // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
+ // occasional issues with feerate disagreements between an initiator that wants a feerate
+ // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
+ // sat/kw before the comparison here.
+ if feerate_per_kw + 250 < lower_limit {
+ if let Some(cur_feerate) = cur_feerate_per_kw {
+ if feerate_per_kw > cur_feerate {
+ log_warn!(logger,
+ "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
+ cur_feerate, feerate_per_kw);
+ return Ok(());
+ }
+ }
+ return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
+ }
+ Ok(())
+ }
+
+ #[inline]
+ fn get_closing_scriptpubkey(&self) -> Script {
+ // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
+ // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
+ // outside of those situations will fail.
+ self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
+ }
+
+ #[inline]
+ fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
+ let mut ret =
+ (4 + // version
+ 1 + // input count
+ 36 + // prevout
+ 1 + // script length (0)
+ 4 + // sequence
+ 1 + // output count
+ 4 // lock time
+ )*4 + // * 4 for non-witness parts
+ 2 + // witness marker and flag
+ 1 + // witness element count
+ 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
+ self.context.get_funding_redeemscript().len() as u64 + // funding witness script
+ 2*(1 + 71); // two signatures + sighash type flags
+ if let Some(spk) = a_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ if let Some(spk) = b_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ ret
+ }
+
+ #[inline]
+ fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
+ assert!(self.context.pending_inbound_htlcs.is_empty());
+ assert!(self.context.pending_outbound_htlcs.is_empty());
+ assert!(self.context.pending_update_fee.is_none());
+
+ let mut total_fee_satoshis = proposed_total_fee_satoshis;
+ let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
+ let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
+
+ if value_to_holder < 0 {
+ assert!(self.context.is_outbound());
+ total_fee_satoshis += (-value_to_holder) as u64;
+ } else if value_to_counterparty < 0 {
+ assert!(!self.context.is_outbound());
+ total_fee_satoshis += (-value_to_counterparty) as u64;
+ }
+
+ if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_counterparty = 0;
+ }
+
+ if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_holder = 0;
+ }
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let holder_shutdown_script = self.get_closing_scriptpubkey();
+ let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
+ let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
+
+ let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
+ (closing_transaction, total_fee_satoshis)
+ }
+
+ fn funding_outpoint(&self) -> OutPoint {
+ self.context.channel_transaction_parameters.funding_outpoint.unwrap()
+ }
+
+ /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
+ /// entirely.
+ ///
+ /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
+ /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
+ ///
+ /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
+ /// disconnected).
+ pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
+ (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
+ where L::Target: Logger {
+ // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
+ // (see equivalent if condition there).
+ assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+ let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
+ let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+ self.context.latest_monitor_update_id = mon_update_id;
+ if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
+ assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
+ }
+ }
+
+ fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
+ // Either ChannelReady got set (which means it won't be unset) or there is no way any
+ // caller thought we could have something claimed (cause we wouldn't have accepted in an
+ // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
+ // either.
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from