cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
}
- pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
+ pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
+ _fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
+ where F::Target: FeeEstimator
+ {
match self.config.options.max_dust_htlc_exposure {
MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
MaxDustHTLCExposure::FeeRateMultiplier(_) => 5_000_000,
/// Doesn't bother handling the
/// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
/// corner case properly.
- pub fn get_available_balances(&self) -> AvailableBalances {
+ pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+ -> AvailableBalances
+ where F::Target: FeeEstimator
+ {
let context = &self;
// Note that we have to handle overflow due to the above case.
let inbound_stats = context.get_inbound_pending_htlc_stats(None);
// send above the dust limit (as the router can always overpay to meet the dust limit).
let mut remaining_msat_below_dust_exposure_limit = None;
let mut dust_exposure_dust_limit_msat = 0;
+ let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
(context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
};
let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
+ if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
remaining_msat_below_dust_exposure_limit =
- Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+ Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
}
let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
- if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
+ if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
remaining_msat_below_dust_exposure_limit = Some(cmp::min(
remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
- context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
+ max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
}
Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
}
- pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
- where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
+ pub fn update_add_htlc<F, FE: Deref, L: Deref>(
+ &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
+ create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
+ ) -> Result<(), ChannelError>
+ where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
+ FE::Target: FeeEstimator, L::Target: Logger,
+ {
// We can't accept HTLCs sent after we've sent a shutdown.
let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
if local_sent_shutdown {
}
}
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
(0, 0)
} else {
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
- if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+ if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+ on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
}
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
- if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+ if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
- on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+ on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
}
/// Public version of the below, checking relevant preconditions first.
/// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
/// returns `(None, Vec::new())`.
- pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
- self.free_holding_cell_htlcs(logger)
+ self.free_holding_cell_htlcs(fee_estimator, logger)
} else { (None, Vec::new()) }
}
/// Frees any pending commitment updates in the holding cell, generating the relevant messages
/// for our counterparty.
- fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ fn free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
skimmed_fee_msat, ..
} => {
match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
- onion_routing_packet.clone(), false, skimmed_fee_msat, logger)
+ onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
{
Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
Err(e) => {
return (None, htlcs_to_fail);
}
let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
- self.send_update_fee(feerate, false, logger)
+ self.send_update_fee(feerate, false, fee_estimator, logger)
} else {
None
};
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
- where L::Target: Logger,
+ pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger,
{
if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
}
- match self.free_holding_cell_htlcs(logger) {
+ match self.free_holding_cell_htlcs(fee_estimator, logger) {
(Some(mut additional_update), htlcs_to_fail) => {
// free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
// strictly increasing by one, so decrement it here.
/// Queues up an outbound update fee by placing it in the holding cell. You should call
/// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
/// commitment update.
- pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
- let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
+ pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
assert!(msg_opt.is_none(), "We forced holding cell?");
}
///
/// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
/// [`Channel`] if `force_holding_cell` is false.
- fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
+ fn send_update_fee<F: Deref, L: Deref>(
+ &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Option<msgs::UpdateFee>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
if !self.context.is_outbound() {
panic!("Cannot send fee from inbound channel");
}
// Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
return None;
}
- if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
return None;
}
let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
msg.feerate_per_kw, holder_tx_dust_exposure)));
}
- if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
msg.feerate_per_kw, counterparty_tx_dust_exposure)));
}
/// commitment update.
///
/// `Err`s will only be [`ChannelError::Ignore`].
- pub fn queue_add_htlc<L: Deref>(
+ pub fn queue_add_htlc<F: Deref, L: Deref>(
&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
- onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>, logger: &L
- ) -> Result<(), ChannelError> where L::Target: Logger {
+ onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
self
.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
- skimmed_fee_msat, logger)
+ skimmed_fee_msat, fee_estimator, logger)
.map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
.map_err(|err| {
if let ChannelError::Ignore(_) = err { /* fine */ }
/// on this [`Channel`] if `force_holding_cell` is false.
///
/// `Err`s will only be [`ChannelError::Ignore`].
- fn send_htlc<L: Deref>(
+ fn send_htlc<F: Deref, L: Deref>(
&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
- skimmed_fee_msat: Option<u64>, logger: &L
- ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
+ skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
}
return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
}
- let available_balances = self.context.get_available_balances();
+ let available_balances = self.context.get_available_balances(fee_estimator);
if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
available_balances.next_outbound_htlc_minimum_msat)));
///
/// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
/// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
- pub fn send_htlc_and_commit<L: Deref>(
- &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
- onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>, logger: &L
- ) -> Result<Option<ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+ pub fn send_htlc_and_commit<F: Deref, L: Deref>(
+ &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
+ source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
- onion_routing_packet, false, skimmed_fee_msat, logger);
+ onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
match send_res? {
Some(_) => {
self.short_channel_id.or(self.outbound_scid_alias)
}
- fn from_channel_context<Signer: WriteableEcdsaChannelSigner>(context: &ChannelContext<Signer>,
- best_block_height: u32, latest_features: InitFeatures) -> Self {
-
- let balance = context.get_available_balances();
+ fn from_channel_context<Signer: WriteableEcdsaChannelSigner, F: Deref>(
+ context: &ChannelContext<Signer>, best_block_height: u32, latest_features: InitFeatures,
+ fee_estimator: &LowerBoundedFeeEstimator<F>
+ ) -> Self
+ where F::Target: FeeEstimator
+ {
+
+ let balance = context.get_available_balances(fee_estimator);
let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
context.get_holder_counterparty_selected_channel_reserve_satoshis();
ChannelDetails {
let peer_state = &mut *peer_state_lock;
for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone());
+ peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
}
let peer_state = &mut *peer_state_lock;
for (_channel_id, channel) in peer_state.channel_by_id.iter() {
let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone());
+ peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone());
+ peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone());
+ peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
}
return peer_state.channel_by_id
.iter()
.map(|(_, channel)|
- ChannelDetails::from_channel_context(&channel.context, best_block_height, features.clone()))
+ ChannelDetails::from_channel_context(&channel.context, best_block_height,
+ features.clone(), &self.fee_estimator))
.collect();
}
vec![]
session_priv: session_priv.clone(),
first_hop_htlc_msat: htlc_msat,
payment_id,
- }, onion_packet, None, &self.logger);
+ }, onion_packet, None, &self.fee_estimator, &self.logger);
match break_chan_entry!(self, send_res, chan) {
Some(monitor_update) => {
match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
});
if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
payment_hash, outgoing_cltv_value, htlc_source.clone(),
- onion_packet, skimmed_fee_msat, &self.logger)
+ onion_packet, skimmed_fee_msat, &self.fee_estimator,
+ &self.logger)
{
if let ChannelError::Ignore(msg) = e {
log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
log_bytes!(chan_id[..]), chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
- chan.queue_update_fee(new_feerate, &self.logger);
+ chan.queue_update_fee(new_feerate, &self.fee_estimator, &self.logger);
NotifyOption::DoPersist
}
_ => pending_forward_info
}
};
- try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
+ try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan);
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let funding_txo = chan.get().context.get_funding_txo();
- let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+ let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan);
let res = if let Some(monitor_update) = monitor_update_opt {
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
let counterparty_node_id = chan.context.get_counterparty_node_id();
let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) =
- chan.maybe_free_holding_cell_htlcs(&self.logger);
+ chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &self.logger);
if !holding_cell_failed_htlcs.is_empty() {
failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
}