use crate::ln::{PaymentPreimage, PaymentHash};
use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
use crate::ln::msgs;
-use crate::ln::msgs::{DecodeError, OptionalField, DataLossProtect};
+use crate::ln::msgs::DecodeError;
use crate::ln::script::{self, ShutdownScript};
use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT};
use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
use crate::ln::onion_utils::HTLCFailReason;
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
-use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
+use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
use crate::chain::transaction::{OutPoint, TransactionData};
-use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
+use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
use crate::events::ClosureReason;
use crate::routing::gossip::NodeId;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
pub outbound_capacity_msat: u64,
/// The maximum value we can assign to the next outbound HTLC
pub next_outbound_htlc_limit_msat: u64,
+ /// The minimum value we can assign to the next outbound HTLC
+ pub next_outbound_htlc_minimum_msat: u64,
}
#[derive(Debug, Clone, Copy, PartialEq)]
/// We've announced the channel as enabled and are connected to our peer.
Enabled,
/// Our channel is no longer live, but we haven't announced the channel as disabled yet.
- DisabledStaged,
+ DisabledStaged(u8),
/// Our channel is live again, but we haven't announced the channel as enabled yet.
- EnabledStaged,
+ EnabledStaged(u8),
/// We've announced the channel as disabled.
Disabled,
}
/// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
+struct PendingChannelMonitorUpdate {
+ update: ChannelMonitorUpdate,
+ /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an
+ /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is
+ /// blocked on some external event and the [`ChannelManager`] will update us when we're ready.
+ ///
+ /// [`ChannelManager`]: super::channelmanager::ChannelManager
+ blocked: bool,
+}
+
+impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
+ (0, update, required),
+ (2, blocked, required),
+});
+
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
user_id: u128,
channel_id: [u8; 32],
+ temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115.
channel_state: u32,
// When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
pub counterparty_max_accepted_htlcs: u16,
#[cfg(not(test))]
counterparty_max_accepted_htlcs: u16,
- //implied by OUR_MAX_HTLCS: max_accepted_htlcs: u16,
+ holder_max_accepted_htlcs: u16,
minimum_depth: Option<u32>,
counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
// blinded paths instead of simple scid+node_id aliases.
outbound_scid_alias: u64,
+ // We track whether we already emitted a `ChannelPending` event.
+ channel_pending_event_emitted: bool,
+
// We track whether we already emitted a `ChannelReady` event.
channel_ready_event_emitted: bool,
/// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence
/// completes we still need to be able to complete the persistence. Thus, we have to keep a
/// copy of the [`ChannelMonitorUpdate`] here until it is complete.
- pending_monitor_updates: Vec<ChannelMonitorUpdate>,
+ pending_monitor_updates: Vec<PendingChannelMonitorUpdate>,
}
#[cfg(any(test, fuzzing))]
feerate: u32,
}
-pub const OUR_MAX_HTLCS: u16 = 50; //TODO
+pub const DEFAULT_MAX_HTLCS: u16 = 50;
pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 {
const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- Some(signer_provider.get_shutdown_scriptpubkey())
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
}
}
+ let destination_script = match signer_provider.get_destination_script() {
+ Ok(script) => script,
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+ };
+
+ let temporary_channel_id = entropy_source.get_secure_random_bytes();
+
Ok(Channel {
user_id,
inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
- channel_id: entropy_source.get_secure_random_bytes(),
+ channel_id: temporary_channel_id,
+ temporary_channel_id: Some(temporary_channel_id),
channel_state: ChannelState::OurInitSent as u32,
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
holder_signer,
shutdown_scriptpubkey,
- destination_script: signer_provider.get_destination_script(),
+ destination_script,
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
counterparty_htlc_minimum_msat: 0,
holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
counterparty_max_accepted_htlcs: 0,
+ holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
minimum_depth: None, // Filled in in accept_channel
counterparty_forwarding_info: None,
latest_inbound_scid_alias: None,
outbound_scid_alias,
+ channel_pending_event_emitted: false,
channel_ready_event_emitted: false,
#[cfg(any(test, fuzzing))]
let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
match &msg.shutdown_scriptpubkey {
- &OptionalField::Present(ref script) => {
+ &Some(ref script) => {
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
if script.len() == 0 {
None
}
},
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &OptionalField::Absent => {
+ &None => {
return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
}
}
} else { None };
let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- Some(signer_provider.get_shutdown_scriptpubkey())
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+ }
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
}
}
+ let destination_script = match signer_provider.get_destination_script() {
+ Ok(script) => script,
+ Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+ };
+
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
inbound_handshake_limits_override: None,
channel_id: msg.temporary_channel_id,
+ temporary_channel_id: Some(msg.temporary_channel_id),
channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
holder_signer,
shutdown_scriptpubkey,
- destination_script: signer_provider.get_destination_script(),
+ destination_script,
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
+ holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
counterparty_forwarding_info: None,
latest_inbound_scid_alias: None,
outbound_scid_alias,
+ channel_pending_event_emitted: false,
channel_ready_event_emitted: false,
#[cfg(any(test, fuzzing))]
}
pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+ let release_cs_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked);
match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
- UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => {
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+ // Even if we aren't supposed to let new monitor updates with commitment state
+ // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+ // matter what. Sadly, to push a new monitor update which flies before others
+ // already queued, we have to insert it into the pending queue and update the
+ // update_ids of all the following monitors.
+ let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them
+ // to be strictly increasing by one, so decrement it here.
+ self.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ self.pending_monitor_updates.len() - 1
+ } else {
+ let insert_pos = self.pending_monitor_updates.iter().position(|upd| upd.blocked)
+ .unwrap_or(self.pending_monitor_updates.len());
+ let new_mon_id = self.pending_monitor_updates.get(insert_pos)
+ .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+ monitor_update.update_id = new_mon_id;
+ self.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ for held_update in self.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+ held_update.update.update_id += 1;
+ }
+ if msg.is_some() {
+ debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+ let update = self.build_commitment_no_status_check(logger);
+ self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update, blocked: true,
+ });
+ }
+ insert_pos
+ };
+ self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
UpdateFulfillCommitFetch::NewClaim {
- monitor_update: self.pending_monitor_updates.last().unwrap(),
+ monitor_update: &self.pending_monitor_updates.get(unblocked_update_pos)
+ .expect("We just pushed the monitor update").update,
htlc_value_msat,
}
},
- UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => {
- self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- UpdateFulfillCommitFetch::NewClaim {
- monitor_update: self.pending_monitor_updates.last().unwrap(),
- htlc_value_msat,
- }
- }
UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
}
}
let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
match &msg.shutdown_scriptpubkey {
- &OptionalField::Present(ref script) => {
+ &Some(ref script) => {
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
if script.len() == 0 {
None
}
},
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &OptionalField::Absent => {
+ &None => {
return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
}
}
Ok((msgs::FundingSigned {
channel_id: self.channel_id,
- signature
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
}, channel_monitor))
}
/// corner case properly.
pub fn get_available_balances(&self) -> AvailableBalances {
// Note that we have to handle overflow due to the above case.
+ let inbound_stats = self.get_inbound_pending_htlc_stats(None);
let outbound_stats = self.get_outbound_pending_htlc_stats(None);
let mut balance_msat = self.value_to_self_msat;
}
balance_msat -= outbound_stats.pending_htlcs_value_msat;
- let outbound_capacity_msat = cmp::max(self.value_to_self_msat as i64
- - outbound_stats.pending_htlcs_value_msat as i64
- - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000,
- 0) as u64;
+ let outbound_capacity_msat = self.value_to_self_msat
+ .saturating_sub(outbound_stats.pending_htlcs_value_msat)
+ .saturating_sub(
+ self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
+
+ let mut available_capacity_msat = outbound_capacity_msat;
+
+ if self.is_outbound() {
+ // We should mind channel commit tx fee when computing how much of the available capacity
+ // can be used in the next htlc. Mirrors the logic in send_htlc.
+ //
+ // The fee depends on whether the amount we will be sending is above dust or not,
+ // and the answer will in turn change the amount itself — making it a circular
+ // dependency.
+ // This complicates the computation around dust-values, up to the one-htlc-value.
+ let mut real_dust_limit_timeout_sat = self.holder_dust_limit_satoshis;
+ if !self.opt_anchors() {
+ real_dust_limit_timeout_sat += self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000;
+ }
+
+ let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
+ let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
+ let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
+ let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
+
+ // We will first subtract the fee as if we were above-dust. Then, if the resulting
+ // value ends up being below dust, we have this fee available again. In that case,
+ // match the value to right-below-dust.
+ let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64);
+ if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
+ let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
+ debug_assert!(one_htlc_difference_msat != 0);
+ capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
+ capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
+ available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
+ } else {
+ available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
+ }
+ } else {
+ // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
+ // sending a new HTLC won't reduce their balance below our reserve threshold.
+ let mut real_dust_limit_success_sat = self.counterparty_dust_limit_satoshis;
+ if !self.opt_anchors() {
+ real_dust_limit_success_sat += self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000;
+ }
+
+ let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
+ let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
+
+ let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000;
+ let remote_balance_msat = (self.channel_value_satoshis * 1000 - self.value_to_self_msat)
+ .saturating_sub(inbound_stats.pending_htlcs_value_msat);
+
+ if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
+ // If another HTLC's fee would reduce the remote's balance below the reserve limit
+ // we've selected for them, we can only send dust HTLCs.
+ available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
+ }
+ }
+
+ let mut next_outbound_htlc_minimum_msat = self.counterparty_htlc_minimum_msat;
+
+ // If we get close to our maximum dust exposure, we end up in a situation where we can send
+ // between zero and the remaining dust exposure limit remaining OR above the dust limit.
+ // Because we cannot express this as a simple min/max, we prefer to tell the user they can
+ // send above the dust limit (as the router can always overpay to meet the dust limit).
+ let mut remaining_msat_below_dust_exposure_limit = None;
+ let mut dust_exposure_dust_limit_msat = 0;
+
+ let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() {
+ (self.counterparty_dust_limit_satoshis, self.holder_dust_limit_satoshis)
+ } else {
+ let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
+ (self.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
+ self.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
+ };
+ let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
+ remaining_msat_below_dust_exposure_limit =
+ Some(self.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+ dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
+ }
+
+ let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
+ remaining_msat_below_dust_exposure_limit = Some(cmp::min(
+ remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
+ self.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
+ dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
+ }
+
+ if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
+ if available_capacity_msat < dust_exposure_dust_limit_msat {
+ available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
+ } else {
+ next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
+ }
+ }
+
+ available_capacity_msat = cmp::min(available_capacity_msat,
+ self.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
+
+ if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 {
+ available_capacity_msat = 0;
+ }
+
AvailableBalances {
inbound_capacity_msat: cmp::max(self.channel_value_satoshis as i64 * 1000
- self.value_to_self_msat as i64
- self.holder_selected_channel_reserve_satoshis as i64 * 1000,
0) as u64,
outbound_capacity_msat,
- next_outbound_htlc_limit_msat: cmp::max(cmp::min(outbound_capacity_msat as i64,
- self.counterparty_max_htlc_value_in_flight_msat as i64
- - outbound_stats.pending_htlcs_value_msat as i64),
- 0) as u64,
+ next_outbound_htlc_limit_msat: available_capacity_msat,
+ next_outbound_htlc_minimum_msat,
balance_msat,
}
}
feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
}
- // Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
- // number of pending HTLCs that are on track to be in our next commitment tx, plus an additional
- // HTLC if `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs
- // are excluded.
+ /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
+ /// number of pending HTLCs that are on track to be in our next commitment tx.
+ ///
+ /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
+ /// `fee_spike_buffer_htlc` is `Some`.
+ ///
+ /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
+ /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
+ ///
+ /// Dust HTLCs are excluded.
fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
assert!(self.is_outbound());
res
}
- // Get the commitment tx fee for the remote's next commitment transaction based on the number of
- // pending HTLCs that are on track to be in their next commitment tx, plus an additional HTLC if
- // `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs are
- // excluded.
+ /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
+ /// pending HTLCs that are on track to be in their next commitment tx
+ ///
+ /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
+ /// `fee_spike_buffer_htlc` is `Some`.
+ ///
+ /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
+ /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
+ ///
+ /// Dust HTLCs are excluded.
fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
assert!(!self.is_outbound());
let inbound_stats = self.get_inbound_pending_htlc_stats(None);
let outbound_stats = self.get_outbound_pending_htlc_stats(None);
- if inbound_stats.pending_htlcs + 1 > OUR_MAX_HTLCS as u32 {
- return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS)));
+ if inbound_stats.pending_htlcs + 1 > self.holder_max_accepted_htlcs as u32 {
+ return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.holder_max_accepted_htlcs)));
}
if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.holder_max_htlc_value_in_flight_msat {
return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.holder_max_htlc_value_in_flight_msat)));
Ok(())
}
- pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<&ChannelMonitorUpdate, ChannelError>
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
where L::Target: Logger
{
if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
}
- // TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update
+ // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
+ // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
+ // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
+ // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
+ // backwards compatibility, we never use it in production. To provide test coverage, here,
+ // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
+ #[allow(unused_assignments, unused_mut)]
+ let mut separate_nondust_htlc_sources = false;
+ #[cfg(all(feature = "std", any(test, fuzzing)))] {
+ use core::hash::{BuildHasher, Hasher};
+ // Get a random value using the only std API to do so - the DefaultHasher
+ let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
+ separate_nondust_htlc_sources = rand_val % 2 == 0;
+ }
+
+ let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
- for (idx, (htlc, source)) in htlcs_cloned.drain(..).enumerate() {
+ for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
if let Some(_) = htlc.transaction_output_index {
let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.opt_anchors(),
if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
}
- htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
+ if !separate_nondust_htlc_sources {
+ htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
+ }
} else {
- htlcs_and_sigs.push((htlc, None, source));
+ htlcs_and_sigs.push((htlc, None, source_opt.take()));
}
+ if separate_nondust_htlc_sources {
+ if let Some(source) = source_opt.take() {
+ nondust_htlc_sources.push(source);
+ }
+ }
+ debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
}
let holder_commitment_tx = HolderCommitmentTransaction::new(
commitment_tx: holder_commitment_tx,
htlc_outputs: htlcs_and_sigs,
claimed_htlcs,
+ nondust_htlc_sources,
}]
};
}
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
log_bytes!(self.channel_id));
- self.pending_monitor_updates.push(monitor_update);
- return Ok(self.pending_monitor_updates.last().unwrap());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
- self.pending_monitor_updates.push(monitor_update);
self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
- return Ok(self.pending_monitor_updates.last().unwrap());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
/// Public version of the below, checking relevant preconditions first.
update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- (Some(self.pending_monitor_updates.last().unwrap()), htlcs_to_fail)
+ (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
} else {
(None, Vec::new())
}
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, &ChannelMonitorUpdate), ChannelError>
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
where L::Target: Logger,
{
if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
self.monitor_pending_failures.append(&mut revoked_htlcs);
self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
- self.pending_monitor_updates.push(monitor_update);
- return Ok((Vec::new(), self.pending_monitor_updates.last().unwrap()));
+ return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
}
match self.free_holding_cell_htlcs(logger) {
(Some(_), htlcs_to_fail) => {
- let mut additional_update = self.pending_monitor_updates.pop().unwrap();
+ let mut additional_update = self.pending_monitor_updates.pop().unwrap().update;
// free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
// strictly increasing by one, so decrement it here.
self.latest_monitor_update_id = monitor_update.update_id;
monitor_update.updates.append(&mut additional_update.updates);
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- self.pending_monitor_updates.push(monitor_update);
- Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
},
(None, htlcs_to_fail) => {
if require_commitment {
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- self.pending_monitor_updates.push(monitor_update);
- Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
} else {
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- self.pending_monitor_updates.push(monitor_update);
- Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap()))
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
}
}
}
{
assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
- self.pending_monitor_updates.clear();
+ let mut found_blocked = false;
+ self.pending_monitor_updates.retain(|upd| {
+ if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
+ if upd.blocked { found_blocked = true; }
+ upd.blocked
+ });
// If we're past (or at) the FundingSent stage on an outbound channel, try to
// (re-)broadcast the funding transaction as we may have declined to broadcast it when we
channel_id: self.channel_id,
per_commitment_secret,
next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
}
}
if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
msg.next_local_commitment_number == 0 {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish".to_owned()));
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
}
if msg.next_remote_commitment_number > 0 {
- match msg.data_loss_protect {
- OptionalField::Present(ref data_loss) => {
- let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx);
- let given_secret = SecretKey::from_slice(&data_loss.your_last_per_commitment_secret)
- .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
- if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+ let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx);
+ let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+ if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) {
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+ }
+ if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
+ macro_rules! log_and_panic {
+ ($err_msg: expr) => {
+ log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
+ panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
}
- if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
- macro_rules! log_and_panic {
- ($err_msg: expr) => {
- log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
- panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
- }
- }
- log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
- This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
- More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
- If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
- ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
- ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
- Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
- See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
- }
- },
- OptionalField::Absent => {}
+ }
+ log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
+ This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
+ More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
+ If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
+ ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
+ ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
+ Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
+ See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
}
}
Some(_) => false,
None => {
assert!(send_shutdown);
- let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
+ let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+ };
if !shutdown_scriptpubkey.is_compatible(their_features) {
return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
}
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- Some(self.pending_monitor_updates.last().unwrap())
+ if self.push_blockable_mon_update(monitor_update) {
+ self.pending_monitor_updates.last().map(|upd| &upd.update)
+ } else { None }
} else { None };
let shutdown = if send_shutdown {
Some(msgs::Shutdown {
self.channel_id
}
+ // Return the `temporary_channel_id` used during channel establishment.
+ //
+ // Will return `None` for channels created prior to LDK version 0.0.115.
+ pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
+ self.temporary_channel_id
+ }
+
pub fn minimum_depth(&self) -> Option<u32> {
self.minimum_depth
}
self.prev_config.map(|prev_config| prev_config.0)
}
+ // Checks whether we should emit a `ChannelPending` event.
+ pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
+ self.is_funding_initiated() && !self.channel_pending_event_emitted
+ }
+
+ // Returns whether we already emitted a `ChannelPending` event.
+ pub(crate) fn channel_pending_event_emitted(&self) -> bool {
+ self.channel_pending_event_emitted
+ }
+
+ // Remembers that we already emitted a `ChannelPending` event.
+ pub(crate) fn set_channel_pending_event_emitted(&mut self) {
+ self.channel_pending_event_emitted = true;
+ }
+
// Checks whether we should emit a `ChannelReady` event.
pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
self.is_usable() && !self.channel_ready_event_emitted
(self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
}
- pub fn get_next_monitor_update(&self) -> Option<&ChannelMonitorUpdate> {
- self.pending_monitor_updates.first()
+ pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
+ if self.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); }
+ self.pending_monitor_updates[0].update.update_id - 1
+ }
+
+ /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
+ /// further blocked monitor update exists after the next.
+ pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
+ for i in 0..self.pending_monitor_updates.len() {
+ if self.pending_monitor_updates[i].blocked {
+ self.pending_monitor_updates[i].blocked = false;
+ return Some((&self.pending_monitor_updates[i].update,
+ self.pending_monitor_updates.len() > i + 1));
+ }
+ }
+ None
+ }
+
+ /// Pushes a new monitor update into our monitor update queue, returning whether it should be
+ /// immediately given to the user for persisting or if it should be held as blocked.
+ fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
+ let release_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+ self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update, blocked: !release_monitor
+ });
+ release_monitor
+ }
+
+ /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
+ /// it should be immediately given to the user for persisting or `None` if it should be held as
+ /// blocked.
+ fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
+ -> Option<&ChannelMonitorUpdate> {
+ let release_monitor = self.push_blockable_mon_update(update);
+ if release_monitor { self.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
+ }
+
+ pub fn no_monitor_updates_pending(&self) -> bool {
+ self.pending_monitor_updates.is_empty()
+ }
+
+ pub fn complete_one_mon_update(&mut self, update_id: u64) {
+ self.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
}
/// Returns true if funding_created was sent/received.
htlc_minimum_msat: self.holder_htlc_minimum_msat,
feerate_per_kw: self.feerate_per_kw as u32,
to_self_delay: self.get_holder_selected_contest_delay(),
- max_accepted_htlcs: OUR_MAX_HTLCS,
+ max_accepted_htlcs: self.holder_max_accepted_htlcs,
funding_pubkey: keys.funding_pubkey,
revocation_basepoint: keys.revocation_basepoint,
payment_point: keys.payment_point,
htlc_basepoint: keys.htlc_basepoint,
first_per_commitment_point,
channel_flags: if self.config.announced_channel {1} else {0},
- shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey {
+ shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey {
Some(script) => script.clone().into_inner(),
None => Builder::new().into_script(),
}),
htlc_minimum_msat: self.holder_htlc_minimum_msat,
minimum_depth: self.minimum_depth.unwrap(),
to_self_delay: self.get_holder_selected_contest_delay(),
- max_accepted_htlcs: OUR_MAX_HTLCS,
+ max_accepted_htlcs: self.holder_max_accepted_htlcs,
funding_pubkey: keys.funding_pubkey,
revocation_basepoint: keys.revocation_basepoint,
payment_point: keys.payment_point,
delayed_payment_basepoint: keys.delayed_payment_basepoint,
htlc_basepoint: keys.htlc_basepoint,
first_per_commitment_point,
- shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey {
+ shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey {
Some(script) => script.clone().into_inner(),
None => Builder::new().into_script(),
}),
channel_type: Some(self.channel_type.clone()),
+ #[cfg(taproot)]
+ next_local_nonce: None,
}
}
temporary_channel_id,
funding_txid: funding_txo.txid,
funding_output_index: funding_txo.index,
- signature
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ #[cfg(taproot)]
+ next_local_nonce: None,
})
}
// valid, and valid in fuzzing mode's arbitrary validity criteria:
let mut pk = [2; 33]; pk[1] = 0xff;
let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
- let data_loss_protect = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
+ let remote_last_secret = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap();
log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id()));
- OptionalField::Present(DataLossProtect {
- your_last_per_commitment_secret: remote_last_secret,
- my_current_per_commitment_point: dummy_pubkey
- })
+ remote_last_secret
} else {
log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id()));
- OptionalField::Present(DataLossProtect {
- your_last_per_commitment_secret: [0;32],
- my_current_per_commitment_point: dummy_pubkey,
- })
+ [0;32]
};
msgs::ChannelReestablish {
channel_id: self.channel_id(),
// dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
// overflow here.
next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number - 1,
- data_loss_protect,
+ your_last_per_commitment_secret: remote_last_secret,
+ my_current_per_commitment_point: dummy_pubkey,
+ // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
+ // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
+ // txid of that interactive transaction, else we MUST NOT set it.
+ next_funding_txid: None,
}
}
return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
}
- if amount_msat < self.counterparty_htlc_minimum_msat {
- return Err(ChannelError::Ignore(format!("Cannot send less than their minimum HTLC value ({})", self.counterparty_htlc_minimum_msat)));
+ let available_balances = self.get_available_balances();
+ if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
+ available_balances.next_outbound_htlc_minimum_msat)));
+ }
+
+ if amount_msat > available_balances.next_outbound_htlc_limit_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
+ available_balances.next_outbound_htlc_limit_msat)));
}
if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
}
- let inbound_stats = self.get_inbound_pending_htlc_stats(None);
- let outbound_stats = self.get_outbound_pending_htlc_stats(None);
- if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 {
- return Err(ChannelError::Ignore(format!("Cannot push more than their max accepted HTLCs ({})", self.counterparty_max_accepted_htlcs)));
- }
- // Check their_max_htlc_value_in_flight_msat
- if outbound_stats.pending_htlcs_value_msat + amount_msat > self.counterparty_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.counterparty_max_htlc_value_in_flight_msat)));
- }
+ let need_holding_cell = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+ log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
+ if force_holding_cell { "into holding cell" }
+ else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
+ else { "to peer" });
- let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number);
- let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger);
- if !self.is_outbound() {
- // Check that we won't violate the remote channel reserve by adding this HTLC.
- let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
- let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_candidate, None);
- let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000;
- if commitment_stats.remote_balance_msat < counterparty_commit_tx_fee_msat + holder_selected_chan_reserve_msat {
- return Err(ChannelError::Ignore("Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_owned()));
- }
- }
-
- let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() {
- (0, 0)
- } else {
- let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
- (dust_buffer_feerate * htlc_success_tx_weight(false) / 1000,
- dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
- };
- let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.counterparty_dust_limit_satoshis;
- if amount_msat / 1000 < exposure_dust_limit_success_sats {
- let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + amount_msat;
- if on_counterparty_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
- return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- on_counterparty_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat())));
- }
- }
-
- let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
- if amount_msat / 1000 < exposure_dust_limit_timeout_sats {
- let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + amount_msat;
- if on_holder_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
- return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
- on_holder_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat())));
- }
- }
-
- let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
- if holder_balance_msat < amount_msat {
- return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, holder_balance_msat)));
- }
-
- // `2 *` and extra HTLC are for the fee spike buffer.
- let commit_tx_fee_msat = if self.is_outbound() {
- let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
- FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(()))
- } else { 0 };
- if holder_balance_msat - amount_msat < commit_tx_fee_msat {
- return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", holder_balance_msat, commit_tx_fee_msat)));
- }
-
- // Check self.counterparty_selected_channel_reserve_satoshis (the amount we must keep as
- // reserve for the remote to have something to claim if we misbehave)
- let chan_reserve_msat = self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000;
- if holder_balance_msat - amount_msat - commit_tx_fee_msat < chan_reserve_msat {
- return Err(ChannelError::Ignore(format!("Cannot send value that would put our balance under counterparty-announced channel reserve value ({})", chan_reserve_msat)));
- }
-
- if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ if need_holding_cell {
force_holding_cell = true;
}
channel_id: self.channel_id,
signature,
htlc_signatures,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
}, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
}
Some(_) => {
let monitor_update = self.build_commitment_no_status_check(logger);
self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- Ok(Some(self.pending_monitor_updates.last().unwrap()))
+ Ok(self.push_ret_blockable_mon_update(monitor_update))
},
None => Ok(None)
}
/// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
/// [`ChannelMonitorUpdate`] will be returned).
pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
- target_feerate_sats_per_kw: Option<u32>)
+ target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
-> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
where SP::Target: SignerProvider {
for htlc in self.pending_outbound_htlcs.iter() {
return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
}
}
+ if self.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
+ return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
+ }
assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
let update_shutdown_script = match self.shutdown_scriptpubkey {
Some(_) => false,
None if !chan_closed => {
- let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey();
+ // use override shutdown script if provided
+ let shutdown_scriptpubkey = match override_shutdown_script {
+ Some(script) => script,
+ None => {
+ // otherwise, use the shutdown scriptpubkey provided by the signer
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
+ },
+ };
if !shutdown_scriptpubkey.is_compatible(their_features) {
return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
}
}],
};
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- self.pending_monitor_updates.push(monitor_update);
- Some(self.pending_monitor_updates.last().unwrap())
+ if self.push_blockable_mon_update(monitor_update) {
+ self.pending_monitor_updates.last().map(|upd| &upd.update)
+ } else { None }
} else { None };
let shutdown = msgs::Shutdown {
channel_id: self.channel_id,
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
- self.latest_monitor_update_id += 1;
+ self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
Some((funding_txo, ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
// channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
match self {
ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
- ChannelUpdateStatus::DisabledStaged => 0u8.write(writer)?,
- ChannelUpdateStatus::EnabledStaged => 1u8.write(writer)?,
+ ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
+ ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
}
Ok(())
if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config)
{ Some(self.holder_max_htlc_value_in_flight_msat) } else { None };
+ let channel_pending_event_emitted = Some(self.channel_pending_event_emitted);
let channel_ready_event_emitted = Some(self.channel_ready_event_emitted);
// `user_id` used to be a single u64 value. In order to remain backwards compatible with
// we write the high bytes as an option here.
let user_id_high_opt = Some((self.user_id >> 64) as u64);
+ let holder_max_accepted_htlcs = if self.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.holder_max_accepted_htlcs) };
+
write_tlv_fields!(writer, {
(0, self.announcement_sigs, option),
// minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
(23, channel_ready_event_emitted, option),
(25, user_id_high_opt, option),
(27, self.channel_keys_id, required),
+ (28, holder_max_accepted_htlcs, option),
+ (29, self.temporary_channel_id, option),
+ (31, channel_pending_event_emitted, option),
+ (33, self.pending_monitor_updates, vec_type),
});
Ok(())
let value_to_self_msat = Readable::read(reader)?;
let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
- let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, OUR_MAX_HTLCS as usize));
+
+ let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
for _ in 0..pending_inbound_htlc_count {
pending_inbound_htlcs.push(InboundHTLCOutput {
htlc_id: Readable::read(reader)?,
}
let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
- let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, OUR_MAX_HTLCS as usize));
+ let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
for _ in 0..pending_outbound_htlc_count {
pending_outbound_htlcs.push(OutboundHTLCOutput {
htlc_id: Readable::read(reader)?,
}
let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
- let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, OUR_MAX_HTLCS as usize*2));
+ let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
for _ in 0..holding_cell_htlc_update_count {
holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
0 => HTLCUpdateAwaitingACK::AddHTLC {
let monitor_pending_commitment_signed = Readable::read(reader)?;
let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
- let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, OUR_MAX_HTLCS as usize));
+ let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
for _ in 0..monitor_pending_forwards_count {
monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
}
let monitor_pending_failures_count: u64 = Readable::read(reader)?;
- let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, OUR_MAX_HTLCS as usize));
+ let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
for _ in 0..monitor_pending_failures_count {
monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
}
let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
let mut latest_inbound_scid_alias = None;
let mut outbound_scid_alias = None;
+ let mut channel_pending_event_emitted = None;
let mut channel_ready_event_emitted = None;
let mut user_id_high_opt: Option<u64> = None;
let mut channel_keys_id: Option<[u8; 32]> = None;
+ let mut temporary_channel_id: Option<[u8; 32]> = None;
+ let mut holder_max_accepted_htlcs: Option<u16> = None;
+
+ let mut pending_monitor_updates = Some(Vec::new());
read_tlv_fields!(reader, {
(0, announcement_sigs, option),
(23, channel_ready_event_emitted, option),
(25, user_id_high_opt, option),
(27, channel_keys_id, option),
+ (28, holder_max_accepted_htlcs, option),
+ (29, temporary_channel_id, option),
+ (31, channel_pending_event_emitted, option),
+ (33, pending_monitor_updates, vec_type),
});
let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
// separate u64 values.
let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
+ let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
+
Ok(Channel {
user_id,
inbound_handshake_limits_override: None,
channel_id,
+ temporary_channel_id,
channel_state,
announcement_sigs_state: announcement_sigs_state.unwrap(),
secp_ctx,
cur_counterparty_commitment_transaction_number,
value_to_self_msat,
+ holder_max_accepted_htlcs,
pending_inbound_htlcs,
pending_outbound_htlcs,
holding_cell_htlc_updates,
// Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
+ channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
#[cfg(any(test, fuzzing))]
channel_type: channel_type.unwrap(),
channel_keys_id,
- pending_monitor_updates: Vec::new(),
+ pending_monitor_updates: pending_monitor_updates.unwrap(),
})
}
}
use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
use crate::ln::features::ChannelTypeFeatures;
- use crate::ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT};
+ use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
use crate::ln::script::ShutdownScript;
use crate::ln::chan_utils;
use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
use crate::chain::BestBlock;
use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
- use crate::chain::keysinterface::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
+ use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
use crate::chain::transaction::OutPoint;
+ use crate::routing::router::Path;
use crate::util::config::UserConfig;
use crate::util::enforcing_trait_impls::EnforcingSigner;
use crate::util::errors::APIError;
fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
- fn get_destination_script(&self) -> Script {
+ fn get_destination_script(&self) -> Result<Script, ()> {
let secp_ctx = Secp256k1::signing_only();
let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
- Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script()
+ Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
}
- fn get_shutdown_scriptpubkey(&self) -> ShutdownScript {
+ fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
let secp_ctx = Secp256k1::signing_only();
let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
- ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key))
+ Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
}
}
cltv_expiry: 200000000,
state: OutboundHTLCState::Committed,
source: HTLCSource::OutboundRoute {
- path: Vec::new(),
+ path: Path { hops: Vec::new(), blinded_tail: None },
session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
first_hop_htlc_msat: 548,
payment_id: PaymentId([42; 32]),
- payment_secret: None,
}
});
let msg = node_b_chan.get_channel_reestablish(&&logger);
assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
- match msg.data_loss_protect {
- OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => {
- assert_eq!(your_last_per_commitment_secret, [0; 32]);
- },
- _ => panic!()
- }
+ assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
// Check that the commitment point in Node A's channel_reestablish message
// is sane.
let msg = node_a_chan.get_channel_reestablish(&&logger);
assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
- match msg.data_loss_protect {
- OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => {
- assert_eq!(your_last_per_commitment_secret, [0; 32]);
- },
- _ => panic!()
- }
+ assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
}
#[test]
}
}
- #[cfg(not(feature = "grind_signatures"))]
+ #[cfg(feature = "_test_vectors")]
#[test]
fn outbound_commitment_test() {
use bitcoin::util::sighash;
use bitcoin::hashes::hex::FromHex;
use bitcoin::hash_types::Txid;
use bitcoin::secp256k1::Message;
- use crate::chain::keysinterface::EcdsaChannelSigner;
+ use crate::sign::EcdsaChannelSigner;
use crate::ln::PaymentPreimage;
use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
10_000_000,
[0; 32],
+ [0; 32],
);
assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],