use crate::ln::msgs;
use crate::ln::msgs::DecodeError;
use crate::ln::script::{self, ShutdownScript};
-use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
+use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
use crate::ln::chan_utils;
use crate::ln::onion_utils::HTLCFailReason;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
use crate::util::logger::Logger;
use crate::util::errors::APIError;
-use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
+use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
use crate::util::scid_utils::scid_from_parts;
use crate::io;
payment_hash: PaymentHash,
state: OutboundHTLCState,
source: HTLCSource,
+ skimmed_fee_msat: Option<u64>,
}
/// See AwaitingRemoteRevoke ChannelState for more info
payment_hash: PaymentHash,
source: HTLCSource,
onion_routing_packet: msgs::OnionPacket,
+ // The extra fee we're skimming off the top of this HTLC.
+ skimmed_fee_msat: Option<u64>,
},
ClaimHTLC {
payment_preimage: PaymentPreimage,
}
/// The return type of get_update_fulfill_htlc_and_commit.
-pub enum UpdateFulfillCommitFetch<'a> {
+pub enum UpdateFulfillCommitFetch {
/// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
/// it in the holding cell, or re-generated the update_fulfill message after the same claim was
/// previously placed in the holding cell (and has since been removed).
NewClaim {
/// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
- monitor_update: &'a ChannelMonitorUpdate,
+ monitor_update: ChannelMonitorUpdate,
/// The value of the HTLC which was claimed, in msat.
htlc_value_msat: u64,
},
}
/// The return type of `force_shutdown`
+///
+/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
+/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this
+/// channel's counterparty_node_id and channel_id).
pub(crate) type ShutdownResult = (
Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>
struct PendingChannelMonitorUpdate {
update: ChannelMonitorUpdate,
- /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
- /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
- /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
- ///
- /// [`ChannelManager`]: super::channelmanager::ChannelManager
- blocked: bool,
}
impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
(0, update, required),
- (2, blocked, required),
});
/// Contains everything about the channel including state, and various flags.
/// [`SignerProvider::derive_channel_signer`].
channel_keys_id: [u8; 32],
- /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately.
- /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
- /// completes we still need to be able to complete the persistence. Thus, we have to keep a
- /// copy of the [`ChannelMonitorUpdate`] here until it is complete.
- pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
+ /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
+ /// store it here and only release it to the `ChannelManager` once it asks for it.
+ blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
}
impl<Signer: ChannelSigner> ChannelContext<Signer> {
(self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
}
+ /// shutdown state returns the state of the channel in its various stages of shutdown
+ pub fn shutdown_state(&self) -> ChannelShutdownState {
+ if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
+ return ChannelShutdownState::ShutdownComplete;
+ }
+ if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
+ return ChannelShutdownState::ShutdownInitiated;
+ }
+ if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
+ return ChannelShutdownState::ResolvingHTLCs;
+ }
+ if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
+ return ChannelShutdownState::NegotiatingClosingFee;
+ }
+ return ChannelShutdownState::NotShuttingDown;
+ }
+
+ fn closing_negotiation_ready(&self) -> bool {
+ self.pending_inbound_htlcs.is_empty() &&
+ self.pending_outbound_htlcs.is_empty() &&
+ self.pending_update_fee.is_none() &&
+ self.channel_state &
+ (BOTH_SIDES_SHUTDOWN_MASK |
+ ChannelState::AwaitingRemoteRevoke as u32 |
+ ChannelState::PeerDisconnected as u32 |
+ ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
+ }
+
/// Returns true if this channel is currently available for use. This is a superset of
/// is_usable() and considers things like the channel being temporarily disabled.
/// Allowed in any state (including after shutdown)
cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
}
- pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
- self.config.options.max_dust_htlc_exposure_msat
+ pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
+ fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
+ where F::Target: FeeEstimator
+ {
+ match self.config.options.max_dust_htlc_exposure {
+ MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
+ let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
+ ConfirmationTarget::HighPriority);
+ feerate_per_kw as u64 * multiplier
+ },
+ MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
+ }
}
/// Returns the previous [`ChannelConfig`] applied to this channel, if any.
/// Doesn't bother handling the
/// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
/// corner case properly.
- pub fn get_available_balances(&self) -> AvailableBalances {
+ pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+ -> AvailableBalances
+ where F::Target: FeeEstimator
+ {
let context = &self;
// Note that we have to handle overflow due to the above case.
let inbound_stats = context.get_inbound_pending_htlc_stats(None);
// send above the dust limit (as the router can always overpay to meet the dust limit).
let mut remaining_msat_below_dust_exposure_limit = None;
let mut dust_exposure_dust_limit_msat = 0;
+ let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
(context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
};
let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
+ if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
remaining_msat_below_dust_exposure_limit =
- Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+ Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
}
let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
- if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 {
+ if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
remaining_msat_below_dust_exposure_limit = Some(cmp::min(
remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
- context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
+ max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
}
}
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
- let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+ let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
// Even if we aren't supposed to let new monitor updates with commitment state
// matter what. Sadly, to push a new monitor update which flies before others
// already queued, we have to insert it into the pending queue and update the
// update_ids of all the following monitors.
- let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+ if release_cs_monitor && msg.is_some() {
let mut additional_update = self.build_commitment_no_status_check(logger);
// build_commitment_no_status_check may bump latest_monitor_id but we want them
// to be strictly increasing by one, so decrement it here.
self.context.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
- self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
- update: monitor_update, blocked: false,
- });
- self.context.pending_monitor_updates.len() - 1
} else {
- let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
- .unwrap_or(self.context.pending_monitor_updates.len());
- let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
+ let new_mon_id = self.context.blocked_monitor_updates.get(0)
.map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
monitor_update.update_id = new_mon_id;
- self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
- update: monitor_update, blocked: false,
- });
- for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+ for held_update in self.context.blocked_monitor_updates.iter_mut() {
held_update.update.update_id += 1;
}
if msg.is_some() {
debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
let update = self.build_commitment_no_status_check(logger);
- self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
- update, blocked: true,
+ self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+ update,
});
}
- insert_pos
- };
- self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
- UpdateFulfillCommitFetch::NewClaim {
- monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
- .expect("We just pushed the monitor update").update,
- htlc_value_msat,
}
+
+ self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+ UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
},
UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
}
Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
}
- pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
- where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
+ pub fn update_add_htlc<F, FE: Deref, L: Deref>(
+ &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
+ create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
+ ) -> Result<(), ChannelError>
+ where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
+ FE::Target: FeeEstimator, L::Target: Logger,
+ {
// We can't accept HTLCs sent after we've sent a shutdown.
let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
if local_sent_shutdown {
}
}
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
(0, 0)
} else {
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
- if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+ if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+ on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
}
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
- if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+ if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
- on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+ on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
}
Ok(())
}
- pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
where L::Target: Logger
{
if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
/// Public version of the below, checking relevant preconditions first.
/// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
/// returns `(None, Vec::new())`.
- pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
- self.free_holding_cell_htlcs(logger)
+ self.free_holding_cell_htlcs(fee_estimator, logger)
} else { (None, Vec::new()) }
}
/// Frees any pending commitment updates in the holding cell, generating the relevant messages
/// for our counterparty.
- fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ fn free_holding_cell_htlcs<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
// handling this case better and maybe fulfilling some of the HTLCs while attempting
// to rebalance channels.
match &htlc_update {
- &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
- match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
+ &HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+ skimmed_fee_msat, ..
+ } => {
+ match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
+ onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
+ {
Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
Err(e) => {
match e {
return (None, htlcs_to_fail);
}
let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
- self.send_update_fee(feerate, false, logger)
+ self.send_update_fee(feerate, false, fee_estimator, logger)
} else {
None
};
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
- where L::Target: Logger,
+ pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger,
{
if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
}
- match self.free_holding_cell_htlcs(logger) {
- (Some(_), htlcs_to_fail) => {
- let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
+ match self.free_holding_cell_htlcs(fee_estimator, logger) {
+ (Some(mut additional_update), htlcs_to_fail) => {
// free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
// strictly increasing by one, so decrement it here.
self.context.latest_monitor_update_id = monitor_update.update_id;
/// Queues up an outbound update fee by placing it in the holding cell. You should call
/// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
/// commitment update.
- pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
- let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
+ pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
assert!(msg_opt.is_none(), "We forced holding cell?");
}
///
/// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
/// [`Channel`] if `force_holding_cell` is false.
- fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
+ fn send_update_fee<F: Deref, L: Deref>(
+ &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Option<msgs::UpdateFee>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
if !self.context.is_outbound() {
panic!("Cannot send fee from inbound channel");
}
// Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
return None;
}
- if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
return None;
}
{
assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
- let mut found_blocked = false;
- self.context.pending_monitor_updates.retain(|upd| {
- if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
- if upd.blocked { found_blocked = true; }
- upd.blocked
- });
// If we're past (or at) the FundingSent stage on an outbound channel, try to
// (re-)broadcast the funding transaction as we may have declined to broadcast it when we
let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
+ if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
msg.feerate_per_kw, holder_tx_dust_exposure)));
}
- if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
msg.feerate_per_kw, counterparty_tx_dust_exposure)));
}
payment_hash: htlc.payment_hash,
cltv_expiry: htlc.cltv_expiry,
onion_routing_packet: (**onion_packet).clone(),
+ skimmed_fee_msat: htlc.skimmed_fee_msat,
});
}
}
/// this point if we're the funder we should send the initial closing_signed, and in any case
/// shutdown should complete within a reasonable timeframe.
fn closing_negotiation_ready(&self) -> bool {
- self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
- self.context.channel_state &
- (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
- ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
- == BOTH_SIDES_SHUTDOWN_MASK &&
- self.context.pending_update_fee.is_none()
+ self.context.closing_negotiation_ready()
}
/// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
pub fn shutdown<SP: Deref>(
&mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
- ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
where SP::Target: SignerProvider
{
if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- if self.push_blockable_mon_update(monitor_update) {
- self.context.pending_monitor_updates.last().map(|upd| &upd.update)
- } else { None }
+ self.push_ret_blockable_mon_update(monitor_update)
} else { None };
let shutdown = if send_shutdown {
Some(msgs::Shutdown {
(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
}
- pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
- if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
- self.context.pending_monitor_updates[0].update.update_id - 1
+ /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
+ pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
+ if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
+ self.context.blocked_monitor_updates[0].update.update_id - 1
}
/// Returns the next blocked monitor update, if one exists, and a bool which indicates a
/// further blocked monitor update exists after the next.
- pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
- for i in 0..self.context.pending_monitor_updates.len() {
- if self.context.pending_monitor_updates[i].blocked {
- self.context.pending_monitor_updates[i].blocked = false;
- return Some((&self.context.pending_monitor_updates[i].update,
- self.context.pending_monitor_updates.len() > i + 1));
- }
- }
- None
- }
-
- /// Pushes a new monitor update into our monitor update queue, returning whether it should be
- /// immediately given to the user for persisting or if it should be held as blocked.
- fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
- let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
- self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
- update, blocked: !release_monitor
- });
- release_monitor
+ pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
+ if self.context.blocked_monitor_updates.is_empty() { return None; }
+ Some((self.context.blocked_monitor_updates.remove(0).update,
+ !self.context.blocked_monitor_updates.is_empty()))
}
- /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
- /// it should be immediately given to the user for persisting or `None` if it should be held as
- /// blocked.
+ /// Pushes a new monitor update into our monitor update queue, returning it if it should be
+ /// immediately given to the user for persisting or `None` if it should be held as blocked.
fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
- -> Option<&ChannelMonitorUpdate> {
- let release_monitor = self.push_blockable_mon_update(update);
- if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
- }
-
- pub fn no_monitor_updates_pending(&self) -> bool {
- self.context.pending_monitor_updates.is_empty()
- }
-
- pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
- self.context.pending_monitor_updates.retain(|upd| {
- if upd.update.update_id <= update_id {
- assert!(!upd.blocked, "Completed update must have flown");
- false
- } else { true }
- });
- }
-
- pub fn complete_one_mon_update(&mut self, update_id: u64) {
- self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
+ -> Option<ChannelMonitorUpdate> {
+ let release_monitor = self.context.blocked_monitor_updates.is_empty();
+ if !release_monitor {
+ self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+ update,
+ });
+ None
+ } else {
+ Some(update)
+ }
}
- /// Returns an iterator over all unblocked monitor updates which have not yet completed.
- pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
- self.context.pending_monitor_updates.iter()
- .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
+ pub fn blocked_monitor_updates_pending(&self) -> usize {
+ self.context.blocked_monitor_updates.len()
}
/// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
/// commitment update.
///
/// `Err`s will only be [`ChannelError::Ignore`].
- pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
- onion_routing_packet: msgs::OnionPacket, logger: &L)
- -> Result<(), ChannelError> where L::Target: Logger {
+ pub fn queue_add_htlc<F: Deref, L: Deref>(
+ &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+ onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
self
- .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
+ .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
+ skimmed_fee_msat, fee_estimator, logger)
.map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
.map_err(|err| {
if let ChannelError::Ignore(_) = err { /* fine */ }
/// on this [`Channel`] if `force_holding_cell` is false.
///
/// `Err`s will only be [`ChannelError::Ignore`].
- fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
- onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
- -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
+ fn send_htlc<F: Deref, L: Deref>(
+ &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+ onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
+ skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
}
return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
}
- let available_balances = self.context.get_available_balances();
+ let available_balances = self.context.get_available_balances(fee_estimator);
if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
available_balances.next_outbound_htlc_minimum_msat)));
cltv_expiry,
source,
onion_routing_packet,
+ skimmed_fee_msat,
});
return Ok(None);
}
cltv_expiry,
state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
source,
+ skimmed_fee_msat,
});
let res = msgs::UpdateAddHTLC {
payment_hash,
cltv_expiry,
onion_routing_packet,
+ skimmed_fee_msat,
};
self.context.next_holder_htlc_id += 1;
///
/// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
/// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
- pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
- let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
+ pub fn send_htlc_and_commit<F: Deref, L: Deref>(
+ &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
+ source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
+ ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
+ onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
match send_res? {
Some(_) => {
/// [`ChannelMonitorUpdate`] will be returned).
pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
- -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+ -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
where SP::Target: SignerProvider {
for htlc in self.context.pending_outbound_htlcs.iter() {
if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- if self.push_blockable_mon_update(monitor_update) {
- self.context.pending_monitor_updates.last().map(|upd| &upd.update)
- } else { None }
+ self.push_ret_blockable_mon_update(monitor_update)
} else { None };
let shutdown = msgs::Shutdown {
channel_id: self.context.channel_id,
channel_type,
channel_keys_id,
- pending_monitor_updates: Vec::new(),
+ blocked_monitor_updates: Vec::new(),
}
})
}
// Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
// set it now. If they don't understand it, we'll fall back to our default of
// `only_static_remotekey`.
- #[cfg(anchors)]
- { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
- if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
- their_features.supports_anchors_zero_fee_htlc_tx() {
- ret.set_anchors_zero_fee_htlc_tx_required();
- }
+ if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
+ their_features.supports_anchors_zero_fee_htlc_tx() {
+ ret.set_anchors_zero_fee_htlc_tx_required();
}
ret
channel_type,
channel_keys_id,
- pending_monitor_updates: Vec::new(),
+ blocked_monitor_updates: Vec::new(),
}
};
}
let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
+ let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
(self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
- for htlc in self.context.pending_outbound_htlcs.iter() {
+ for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
htlc.htlc_id.write(writer)?;
htlc.amount_msat.write(writer)?;
htlc.cltv_expiry.write(writer)?;
reason.write(writer)?;
}
}
+ if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
+ if pending_outbound_skimmed_fees.is_empty() {
+ for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
+ }
+ pending_outbound_skimmed_fees.push(Some(skimmed_fee));
+ } else if !pending_outbound_skimmed_fees.is_empty() {
+ pending_outbound_skimmed_fees.push(None);
+ }
}
+ let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
(self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
- for update in self.context.holding_cell_htlc_updates.iter() {
+ for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
match update {
- &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => {
+ &HTLCUpdateAwaitingACK::AddHTLC {
+ ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
+ skimmed_fee_msat,
+ } => {
0u8.write(writer)?;
amount_msat.write(writer)?;
cltv_expiry.write(writer)?;
payment_hash.write(writer)?;
source.write(writer)?;
onion_routing_packet.write(writer)?;
+
+ if let Some(skimmed_fee) = skimmed_fee_msat {
+ if holding_cell_skimmed_fees.is_empty() {
+ for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
+ }
+ holding_cell_skimmed_fees.push(Some(skimmed_fee));
+ } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
},
&HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
1u8.write(writer)?;
(5, self.context.config, required),
(6, serialized_holder_htlc_max_in_flight, option),
(7, self.context.shutdown_scriptpubkey, option),
+ (8, self.context.blocked_monitor_updates, optional_vec),
(9, self.context.target_closing_feerate_sats_per_kw, option),
- (11, self.context.monitor_pending_finalized_fulfills, vec_type),
+ (11, self.context.monitor_pending_finalized_fulfills, required_vec),
(13, self.context.channel_creation_height, required),
- (15, preimages, vec_type),
+ (15, preimages, required_vec),
(17, self.context.announcement_sigs_state, required),
(19, self.context.latest_inbound_scid_alias, option),
(21, self.context.outbound_scid_alias, required),
(28, holder_max_accepted_htlcs, option),
(29, self.context.temporary_channel_id, option),
(31, channel_pending_event_emitted, option),
- (33, self.context.pending_monitor_updates, vec_type),
+ (35, pending_outbound_skimmed_fees, optional_vec),
+ (37, holding_cell_skimmed_fees, optional_vec),
});
Ok(())
},
_ => return Err(DecodeError::InvalidValue),
},
+ skimmed_fee_msat: None,
});
}
payment_hash: Readable::read(reader)?,
source: Readable::read(reader)?,
onion_routing_packet: Readable::read(reader)?,
+ skimmed_fee_msat: None,
},
1 => HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: Readable::read(reader)?,
let mut temporary_channel_id: Option<[u8; 32]> = None;
let mut holder_max_accepted_htlcs: Option<u16> = None;
- let mut pending_monitor_updates = Some(Vec::new());
+ let mut blocked_monitor_updates = Some(Vec::new());
+
+ let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
+ let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
(6, holder_max_htlc_value_in_flight_msat, option),
(7, shutdown_scriptpubkey, option),
+ (8, blocked_monitor_updates, optional_vec),
(9, target_closing_feerate_sats_per_kw, option),
- (11, monitor_pending_finalized_fulfills, vec_type),
+ (11, monitor_pending_finalized_fulfills, optional_vec),
(13, channel_creation_height, option),
- (15, preimages_opt, vec_type),
+ (15, preimages_opt, optional_vec),
(17, announcement_sigs_state, option),
(19, latest_inbound_scid_alias, option),
(21, outbound_scid_alias, option),
(28, holder_max_accepted_htlcs, option),
(29, temporary_channel_id, option),
(31, channel_pending_event_emitted, option),
- (33, pending_monitor_updates, vec_type),
+ (35, pending_outbound_skimmed_fees_opt, optional_vec),
+ (37, holding_cell_skimmed_fees_opt, optional_vec),
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
+ if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
+ let mut iter = skimmed_fees.into_iter();
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
+ }
+ // We expect all skimmed fees to be consumed above
+ if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+ }
+ if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
+ let mut iter = skimmed_fees.into_iter();
+ for htlc in holding_cell_htlc_updates.iter_mut() {
+ if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
+ *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
+ }
+ }
+ // We expect all skimmed fees to be consumed above
+ if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
+ }
+
Ok(Channel {
context: ChannelContext {
user_id,
channel_type: channel_type.unwrap(),
channel_keys_id,
- pending_monitor_updates: pending_monitor_updates.unwrap(),
+ blocked_monitor_updates: blocked_monitor_updates.unwrap(),
}
})
}
use hex;
use crate::ln::PaymentHash;
use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
- #[cfg(anchors)]
use crate::ln::channel::InitFeatures;
use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
}
}
- #[cfg(not(feature = "grind_signatures"))]
+ #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
}
session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
first_hop_htlc_msat: 548,
payment_id: PaymentId([42; 32]),
- }
+ },
+ skimmed_fee_msat: None,
});
// Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
+ skimmed_fee_msat: None,
};
out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
out
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
+ skimmed_fee_msat: None,
};
out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
out
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
+ skimmed_fee_msat: None,
};
out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
out
payment_hash: PaymentHash([0; 32]),
state: OutboundHTLCState::Committed,
source: HTLCSource::dummy(),
+ skimmed_fee_msat: None,
};
out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
out
assert!(res.is_ok());
}
- #[cfg(anchors)]
#[test]
fn test_supports_anchors_zero_htlc_tx_fee() {
// Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
assert_eq!(channel_b.context.channel_type, expected_channel_type);
}
- #[cfg(anchors)]
#[test]
fn test_rejects_implicit_simple_anchors() {
// Tests that if `option_anchors` is being negotiated implicitly through the intersection of
assert!(channel_b.is_err());
}
- #[cfg(anchors)]
#[test]
fn test_rejects_simple_anchors_channel_type() {
// Tests that if `option_anchors` is being negotiated through the `channel_type` feature,