use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
use ln::chan_utils;
use chain::BestBlock;
-use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
+use chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS};
use chain::transaction::{OutPoint, TransactionData};
use chain::keysinterface::{Sign, KeysInterface};
use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
use util::logger::Logger;
use util::errors::APIError;
-use util::config::{UserConfig, ChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
+use util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits};
use util::scid_utils::scid_from_parts;
use io;
/// signatures in a commitment_signed message.
/// Implies AwaitingRemoteRevoke.
///
- /// [BOLT #2]: https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md
+ /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
/// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
/// We have also included this HTLC in our latest commitment_signed and are now just waiting
/// There are a few "states" and then a number of flags which can be applied:
/// We first move through init with OurInitSent -> TheirInitSent -> FundingCreated -> FundingSent.
-/// TheirFundingLocked and OurFundingLocked then get set on FundingSent, and when both are set we
+/// TheirChannelReady and OurChannelReady then get set on FundingSent, and when both are set we
/// move on to ChannelFunded.
/// Note that PeerDisconnected can be set on both ChannelFunded and FundingSent.
/// ChannelFunded can then get all remaining flags set on it, until we finish shutdown, then we
/// upon receipt of funding_created, so simply skip this state.
FundingCreated = 4,
/// Set when we have received/sent funding_created and funding_signed and are thus now waiting
- /// on the funding transaction to confirm. The FundingLocked flags are set to indicate when we
+ /// on the funding transaction to confirm. The ChannelReady flags are set to indicate when we
/// and our counterparty consider the funding transaction confirmed.
FundingSent = 8,
- /// Flag which can be set on FundingSent to indicate they sent us a funding_locked message.
- /// Once both TheirFundingLocked and OurFundingLocked are set, state moves on to ChannelFunded.
- TheirFundingLocked = 1 << 4,
- /// Flag which can be set on FundingSent to indicate we sent them a funding_locked message.
- /// Once both TheirFundingLocked and OurFundingLocked are set, state moves on to ChannelFunded.
- OurFundingLocked = 1 << 5,
+ /// Flag which can be set on FundingSent to indicate they sent us a channel_ready message.
+ /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded.
+ TheirChannelReady = 1 << 4,
+ /// Flag which can be set on FundingSent to indicate we sent them a channel_ready message.
+ /// Once both TheirChannelReady and OurChannelReady are set, state moves on to ChannelFunded.
+ OurChannelReady = 1 << 5,
ChannelFunded = 64,
/// Flag which is set on ChannelFunded and FundingSent indicating remote side is considered
/// "disconnected" and no updates are allowed until after we've done a channel_reestablish
pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
pub finalized_claimed_htlcs: Vec<HTLCSource>,
pub funding_broadcastable: Option<Transaction>,
- pub funding_locked: Option<msgs::FundingLocked>,
+ pub channel_ready: Option<msgs::ChannelReady>,
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
}
/// The return value of `channel_reestablish`
pub(super) struct ReestablishResponses {
- pub funding_locked: Option<msgs::FundingLocked>,
+ pub channel_ready: Option<msgs::ChannelReady>,
pub raa: Option<msgs::RevokeAndACK>,
pub commitment_update: Option<msgs::CommitmentUpdate>,
pub order: RAACommitmentOrder,
/// transaction (not counting the value of the HTLCs themselves).
pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
+/// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
+/// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
+/// ChannelUpdate prompted by the config update. This value was determined as follows:
+///
+/// * The expected interval between ticks (1 minute).
+/// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
+/// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
+/// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
+pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
+
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
// Holder designates channel data owned for the benefice of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
pub(super) struct Channel<Signer: Sign> {
- #[cfg(any(test, feature = "_test_utils"))]
- pub(crate) config: ChannelConfig,
- #[cfg(not(any(test, feature = "_test_utils")))]
- config: ChannelConfig,
+ config: LegacyChannelConfig,
+
+ // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
+ // constructed using it. The second element in the tuple corresponds to the number of ticks that
+ // have elapsed since the update occurred.
+ prev_config: Option<(ChannelConfig, usize)>,
inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
/// send it first.
resend_order: RAACommitmentOrder,
- monitor_pending_funding_locked: bool,
+ monitor_pending_channel_ready: bool,
monitor_pending_revoke_and_ack: bool,
monitor_pending_commitment_signed: bool,
monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
/// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
/// they will not send a channel_reestablish until the channel locks in. Then, they will send a
- /// funding_locked *before* sending the channel_reestablish (which is clearly a violation of
- /// the BOLT specs). We copy c-lightning's workaround here and simply store the funding_locked
+ /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
+ /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
/// message until we receive a channel_reestablish.
///
/// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
- pub workaround_lnd_bug_4006: Option<msgs::FundingLocked>,
+ pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
#[cfg(any(test, fuzzing))]
// When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
// Our counterparty can offer us SCID aliases which they will map to this channel when routing
// outbound payments. These can be used in invoice route hints to avoid explicitly revealing
// the channel's funding UTXO.
+ //
+ // We also use this when sending our peer a channel_update that isn't to be broadcasted
+ // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
+ // associated channel mapping.
+ //
// We only bother storing the most recent SCID alias at any time, though our counterparty has
// to store all of them.
latest_inbound_scid_alias: Option<u64>,
/// In order to avoid having to concern ourselves with standardness during the closing process, we
/// simply require our counterparty to use a dust limit which will leave any segwit output
/// standard.
-/// See https://github.com/lightningnetwork/lightning-rfc/issues/905 for more details.
+/// See https://github.com/lightning/bolts/issues/905 for more details.
pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
/// Used to return a simple Error back to ChannelManager. Will get converted to a
Ignore(String),
Warn(String),
Close(String),
- CloseDelayBroadcast(String),
}
impl fmt::Debug for ChannelError {
&ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
&ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
&ChannelError::Close(ref e) => write!(f, "Close : {}", e),
- &ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e)
}
}
}
// available. If it's private, we first try `scid_privacy` as it provides better privacy
// with no other changes, and fall back to `only_static_remotekey`
let mut ret = ChannelTypeFeatures::only_static_remote_key();
- if !config.channel_options.announced_channel && config.own_channel_config.negotiate_scid_privacy {
+ if !config.channel_handshake_config.announced_channel && config.channel_handshake_config.negotiate_scid_privacy {
ret.set_scid_privacy_required();
}
ret
// Constructors:
pub fn new_outbound<K: Deref, F: Deref>(
- fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures,
channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig, current_chain_height: u32,
outbound_scid_alias: u64
) -> Result<Channel<Signer>, APIError>
{
let opt_anchors = false; // TODO - should be based on features
- let holder_selected_contest_delay = config.own_channel_config.our_to_self_delay;
+ let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
let holder_signer = keys_provider.get_channel_signer(false, channel_value_satoshis);
let pubkeys = holder_signer.pubkeys().clone();
return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
}
- let feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors);
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_provider.get_secure_random_bytes());
- let shutdown_scriptpubkey = if config.channel_options.commit_upfront_shutdown_pubkey {
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
Some(keys_provider.get_shutdown_scriptpubkey())
} else { None };
Ok(Channel {
user_id,
- config: config.channel_options.clone(),
- inbound_handshake_limits_override: Some(config.peer_channel_config_limits.clone()),
+
+ config: LegacyChannelConfig {
+ options: config.channel_config.clone(),
+ announced_channel: config.channel_handshake_config.announced_channel,
+ commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+ },
+
+ prev_config: None,
+
+ inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
channel_id: keys_provider.get_secure_random_bytes(),
channel_state: ChannelState::OurInitSent as u32,
resend_order: RAACommitmentOrder::CommitmentFirst,
- monitor_pending_funding_locked: false,
+ monitor_pending_channel_ready: false,
monitor_pending_revoke_and_ack: false,
monitor_pending_commitment_signed: false,
monitor_pending_forwards: Vec::new(),
counterparty_dust_limit_satoshis: 0,
holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
counterparty_max_htlc_value_in_flight_msat: 0,
- holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.own_channel_config),
+ holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
holder_selected_channel_reserve_satoshis,
counterparty_htlc_minimum_msat: 0,
- holder_htlc_minimum_msat: if config.own_channel_config.our_htlc_minimum_msat == 0 { 1 } else { config.own_channel_config.our_htlc_minimum_msat },
+ holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
counterparty_max_accepted_htlcs: 0,
minimum_depth: None, // Filled in in accept_channel
channel_transaction_parameters: ChannelTransactionParameters {
holder_pubkeys: pubkeys,
- holder_selected_contest_delay: config.own_channel_config.our_to_self_delay,
+ holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
is_outbound_from_holder: true,
counterparty_parameters: None,
funding_outpoint: None,
})
}
- fn check_remote_fee<F: Deref>(fee_estimator: &F, feerate_per_kw: u32) -> Result<(), ChannelError>
+ fn check_remote_fee<F: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>, feerate_per_kw: u32) -> Result<(), ChannelError>
where F::Target: FeeEstimator
{
// We only bound the fee updates on the upper side to prevent completely absurd feerates,
// We generally don't care too much if they set the feerate to something very high, but it
// could result in the channel being useless due to everything being dust.
let upper_limit = cmp::max(250 * 25,
- fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
+ fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
if feerate_per_kw as u64 > upper_limit {
return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
}
- let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
+ let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
// Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
// occasional issues with feerate disagreements between an initiator that wants a feerate
// of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
/// Creates a new channel from a remote sides' request for one.
/// Assumes chain_hash has already been checked and corresponds with what we expect!
pub fn new_from_req<K: Deref, F: Deref, L: Deref>(
- fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures,
+ fee_estimator: &LowerBoundedFeeEstimator<F>, keys_provider: &K, counterparty_node_id: PublicKey, their_features: &InitFeatures,
msg: &msgs::OpenChannel, user_id: u64, config: &UserConfig, current_chain_height: u32, logger: &L,
outbound_scid_alias: u64
) -> Result<Channel<Signer>, ChannelError>
if channel_type.supports_any_optional_bits() {
return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
}
- // We currently only allow two channel types, so write it all out here - we allow
- // `only_static_remote_key` in all contexts, and further allow
- // `static_remote_key|scid_privacy` if the channel is not publicly announced.
- let mut allowed_type = ChannelTypeFeatures::only_static_remote_key();
- if *channel_type != allowed_type {
- allowed_type.set_scid_privacy_required();
- if *channel_type != allowed_type {
+
+ if channel_type.requires_unknown_bits() {
+ return Err(ChannelError::Close("Channel Type field contains unknown bits".to_owned()));
+ }
+
+ // We currently only allow four channel types, so write it all out here - we allow
+ // `only_static_remote_key` or `static_remote_key | zero_conf` in all contexts, and
+ // further allow `static_remote_key | scid_privacy` or
+ // `static_remote_key | scid_privacy | zero_conf`, if the channel is not
+ // publicly announced.
+ if *channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ if !channel_type.requires_scid_privacy() && !channel_type.requires_zero_conf() {
return Err(ChannelError::Close("Channel Type was not understood".to_owned()));
}
- if announced_channel {
+
+ if channel_type.requires_scid_privacy() && announced_channel {
return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
}
}
delayed_payment_basepoint: msg.delayed_payment_basepoint,
htlc_basepoint: msg.htlc_basepoint
};
- let mut local_config = (*config).channel_options.clone();
- if config.own_channel_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
- return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.own_channel_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+ if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
+ return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
}
// Check sanity of message fields:
- if msg.funding_satoshis > config.peer_channel_config_limits.max_funding_satoshis {
- return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.peer_channel_config_limits.max_funding_satoshis, msg.funding_satoshis)));
+ if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
+ return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
}
if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
}
Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
- let max_counterparty_selected_contest_delay = u16::min(config.peer_channel_config_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
if msg.to_self_delay > max_counterparty_selected_contest_delay {
return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
}
}
// Now check against optional parameters as set by config...
- if msg.funding_satoshis < config.peer_channel_config_limits.min_funding_satoshis {
- return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.peer_channel_config_limits.min_funding_satoshis)));
+ if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
+ return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
}
- if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.peer_channel_config_limits.max_htlc_minimum_msat)));
+ if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
}
- if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat)));
+ if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
}
- if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.peer_channel_config_limits.max_channel_reserve_satoshis)));
+ if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
}
- if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.peer_channel_config_limits.min_max_accepted_htlcs)));
+ if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
}
if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
// Convert things into internal flags and prep our state:
- if config.peer_channel_config_limits.force_announced_channel_preference {
- if local_config.announced_channel != announced_channel {
+ if config.channel_handshake_limits.force_announced_channel_preference {
+ if config.channel_handshake_config.announced_channel != announced_channel {
return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
}
}
- // we either accept their preference or the preferences match
- local_config.announced_channel = announced_channel;
let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis);
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
}
} else { None };
- let shutdown_scriptpubkey = if config.channel_options.commit_upfront_shutdown_pubkey {
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
Some(keys_provider.get_shutdown_scriptpubkey())
} else { None };
let chan = Channel {
user_id,
- config: local_config,
+
+ config: LegacyChannelConfig {
+ options: config.channel_config.clone(),
+ announced_channel,
+ commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+ },
+
+ prev_config: None,
+
inbound_handshake_limits_override: None,
channel_id: msg.temporary_channel_id,
resend_order: RAACommitmentOrder::CommitmentFirst,
- monitor_pending_funding_locked: false,
+ monitor_pending_channel_ready: false,
monitor_pending_revoke_and_ack: false,
monitor_pending_commitment_signed: false,
monitor_pending_forwards: Vec::new(),
counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
- holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.own_channel_config),
+ holder_max_htlc_value_in_flight_msat: Self::get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
holder_selected_channel_reserve_satoshis,
counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
- holder_htlc_minimum_msat: if config.own_channel_config.our_htlc_minimum_msat == 0 { 1 } else { config.own_channel_config.our_htlc_minimum_msat },
+ holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
- minimum_depth: Some(config.own_channel_config.minimum_depth),
+ minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
counterparty_forwarding_info: None,
channel_transaction_parameters: ChannelTransactionParameters {
holder_pubkeys: pubkeys,
- holder_selected_contest_delay: config.own_channel_config.our_to_self_delay,
+ holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
is_outbound_from_holder: false,
counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
selected_contest_delay: msg.to_self_delay,
make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
}
+ /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
+ /// entirely.
+ ///
+ /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
+ /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
+ ///
+ /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
+ /// disconnected).
+ pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
+ (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
+ where L::Target: Logger {
+ // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
+ // (see equivalent if condition there).
+ assert!(self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32) != 0);
+ let mon_update_id = self.latest_monitor_update_id; // Forget the ChannelMonitor update
+ let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+ self.latest_monitor_update_id = mon_update_id;
+ if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
+ assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
+ }
+ }
+
fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
// Either ChannelFunded got set (which means it won't be unset) or there is no way any
// caller thought we could have something claimed (cause we wouldn't have accepted in an
};
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
+ // Note that this condition is the same as the assertion in
+ // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
+ // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
+ // do not not get into this branch.
for pending_update in self.holding_cell_htlc_updates.iter() {
match pending_update {
&HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
if msg.minimum_depth > peer_limits.max_minimum_depth {
return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
}
- if msg.minimum_depth == 0 {
- // Note that if this changes we should update the serialization minimum version to
- // indicate to older clients that they don't understand some features of the current
- // channel.
- return Err(ChannelError::Close("Minimum confirmation depth must be at least 1".to_owned()));
- }
if let Some(ty) = &msg.channel_type {
if *ty != self.channel_type {
self.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
self.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
self.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
- self.minimum_depth = Some(msg.minimum_depth);
+
+ if peer_limits.trust_own_funding_0conf {
+ self.minimum_depth = Some(msg.minimum_depth);
+ } else {
+ self.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
+ }
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: msg.funding_pubkey,
&self.get_counterparty_pubkeys().funding_pubkey
}
- pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError> where L::Target: Logger {
+ pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, best_block: BestBlock, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>, Option<msgs::ChannelReady>), ChannelError> where L::Target: Logger {
if self.is_outbound() {
return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
}
&self.channel_transaction_parameters,
funding_redeemscript.clone(), self.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx, best_block);
+ holder_commitment_tx, best_block, self.counterparty_node_id);
channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
Ok((msgs::FundingSigned {
channel_id: self.channel_id,
signature
- }, channel_monitor))
+ }, channel_monitor, self.check_get_channel_ready(0)))
}
/// Handles a funding_signed message from the remote end.
/// If this call is successful, broadcast the funding transaction (and not before!)
- pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction), ChannelError> where L::Target: Logger {
+ pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError> where L::Target: Logger {
if !self.is_outbound() {
return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
}
&self.channel_transaction_parameters,
funding_redeemscript.clone(), self.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx, best_block);
+ holder_commitment_tx, best_block, self.counterparty_node_id);
channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
- Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap()))
+ Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
}
- /// Handles a funding_locked message from our peer. If we've already sent our funding_locked
+ /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
/// and the channel is now usable (and public), this may generate an announcement_signatures to
/// reply with.
- pub fn funding_locked<L: Deref>(&mut self, msg: &msgs::FundingLocked, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock, logger: &L) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError> where L::Target: Logger {
+ pub fn channel_ready<L: Deref>(&mut self, msg: &msgs::ChannelReady, node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock, logger: &L) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError> where L::Target: Logger {
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
self.workaround_lnd_bug_4006 = Some(msg.clone());
- return Err(ChannelError::Ignore("Peer sent funding_locked when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
+ return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
}
if let Some(scid_alias) = msg.short_channel_id_alias {
let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.channel_state |= ChannelState::TheirFundingLocked as u32;
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
+ self.channel_state |= ChannelState::TheirChannelReady as u32;
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
self.update_time_counter += 1;
} else if self.channel_state & (ChannelState::ChannelFunded as u32) != 0 ||
- // If we reconnected before sending our funding locked they may still resend theirs:
- (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) ==
- (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32))
+ // If we reconnected before sending our `channel_ready` they may still resend theirs:
+ (self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
+ (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
{
- // They probably disconnected/reconnected and re-sent the funding_locked, which is
+ // They probably disconnected/reconnected and re-sent the channel_ready, which is
// required, or they're sending a fresh SCID alias.
let expected_point =
if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
// the current one.
self.counterparty_cur_commitment_point
} else {
- // If they have sent updated points, funding_locked is always supposed to match
+ // If they have sent updated points, channel_ready is always supposed to match
// their "first" point, which we re-derive here.
Some(PublicKey::from_secret_key(&self.secp_ctx, &SecretKey::from_slice(
&self.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
).expect("We already advanced, so previous secret keys should have been validated already")))
};
if expected_point != Some(msg.next_per_commitment_point) {
- return Err(ChannelError::Close("Peer sent a reconnect funding_locked with a different point".to_owned()));
+ return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
}
return Ok(None);
} else {
- return Err(ChannelError::Close("Peer sent a funding_locked at a strange time".to_owned()));
+ return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
}
self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point;
self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
- log_info!(logger, "Received funding_locked from peer for channel {}", log_bytes!(self.channel_id()));
+ log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id()));
Ok(self.get_announcement_sigs(node_pk, genesis_block_hash, best_block.height(), logger))
}
/// monitor update failure must *not* have been sent to the remote end, and must instead
/// have been dropped. They will be regenerated when monitor_updating_restored is called.
pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool,
- mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
+ resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
) {
self.monitor_pending_revoke_and_ack |= resend_raa;
self.monitor_pending_commitment_signed |= resend_commitment;
+ self.monitor_pending_channel_ready |= resend_channel_ready;
self.monitor_pending_forwards.append(&mut pending_forwards);
self.monitor_pending_failures.append(&mut pending_fails);
self.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
- let funding_broadcastable = if self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.is_outbound() {
- self.funding_transaction.take()
- } else { None };
+ // If we're past (or at) the FundingSent stage on an outbound channel, try to
+ // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
+ // first received the funding_signed.
+ let mut funding_broadcastable =
+ if self.is_outbound() && self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+ self.funding_transaction.take()
+ } else { None };
+ // That said, if the funding transaction is already confirmed (ie we're active with a
+ // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
+ if self.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelFunded as u32 && self.minimum_depth != Some(0) {
+ funding_broadcastable = None;
+ }
// We will never broadcast the funding transaction when we're in MonitorUpdateFailed (and
// we assume the user never directly broadcasts the funding transaction and waits for us to
- // do it). Thus, we can only ever hit monitor_pending_funding_locked when we're
+ // do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
// * an inbound channel that failed to persist the monitor on funding_created and we got
// the funding transaction confirmed before the monitor was persisted, or
- // * a 0-conf channel and intended to send the funding_locked before any broadcast at all.
- let funding_locked = if self.monitor_pending_funding_locked {
- assert!(!self.is_outbound(), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
- self.monitor_pending_funding_locked = false;
+ // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
+ let channel_ready = if self.monitor_pending_channel_ready {
+ assert!(!self.is_outbound() || self.minimum_depth == Some(0),
+ "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
+ self.monitor_pending_channel_ready = false;
let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
- Some(msgs::FundingLocked {
+ Some(msgs::ChannelReady {
channel_id: self.channel_id(),
next_per_commitment_point,
short_channel_id_alias: Some(self.outbound_scid_alias),
self.monitor_pending_commitment_signed = false;
return MonitorRestoreUpdates {
raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
- accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked, announcement_sigs
+ accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
};
}
if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
MonitorRestoreUpdates {
- raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, funding_locked, announcement_sigs
+ raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
}
}
- pub fn update_fee<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::UpdateFee) -> Result<(), ChannelError>
+ pub fn update_fee<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee) -> Result<(), ChannelError>
where F::Target: FeeEstimator
{
if self.is_outbound() {
/// May panic if some calls other than message-handling calls (which will all Err immediately)
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
+ ///
+ /// Some links printed in log lines are included here to check them during build (when run with
+ /// `cargo doc --document-private-items`):
+ /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
+ /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
pub fn channel_reestablish<L: Deref>(&mut self, msg: &msgs::ChannelReestablish, logger: &L,
node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock)
-> Result<ReestablishResponses, ChannelError> where L::Target: Logger {
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
}
if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
- return Err(ChannelError::CloseDelayBroadcast(
- "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting".to_owned()
- ));
+ macro_rules! log_and_panic {
+ ($err_msg: expr) => {
+ log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
+ panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
+ }
+ }
+ log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
+ This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
+ More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
+ If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
+ ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
+ ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
+ Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
+ See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
}
},
OptionalField::Absent => {}
let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, best_block.height(), logger);
if self.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
- // If we're waiting on a monitor update, we shouldn't re-send any funding_locked's.
- if self.channel_state & (ChannelState::OurFundingLocked as u32) == 0 ||
+ // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
+ if self.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
if msg.next_remote_commitment_number != 0 {
- return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent funding_locked yet".to_owned()));
+ return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
}
// Short circuit the whole handler as there is nothing we can resend them
return Ok(ReestablishResponses {
- funding_locked: None,
+ channel_ready: None,
raa: None, commitment_update: None, mon_update: None,
order: RAACommitmentOrder::CommitmentFirst,
holding_cell_failed_htlcs: Vec::new(),
});
}
- // We have OurFundingLocked set!
+ // We have OurChannelReady set!
let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
return Ok(ReestablishResponses {
- funding_locked: Some(msgs::FundingLocked {
+ channel_ready: Some(msgs::ChannelReady {
channel_id: self.channel_id(),
next_per_commitment_point,
short_channel_id_alias: Some(self.outbound_scid_alias),
let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
// Remote isn't waiting on any RevokeAndACK from us!
- // Note that if we need to repeat our FundingLocked we'll do that in the next if block.
+ // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
None
} else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.cur_holder_commitment_transaction_number {
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
// the corresponding revoke_and_ack back yet.
let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if (self.channel_state & ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 };
- let funding_locked = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
- // We should never have to worry about MonitorUpdateFailed resending FundingLocked
+ let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
+ // We should never have to worry about MonitorUpdateFailed resending ChannelReady
let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
- Some(msgs::FundingLocked {
+ Some(msgs::ChannelReady {
channel_id: self.channel_id(),
next_per_commitment_point,
short_channel_id_alias: Some(self.outbound_scid_alias),
// now!
match self.free_holding_cell_htlcs(logger) {
Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
- Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) =>
+ Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
Ok(ReestablishResponses {
- funding_locked, shutdown_msg, announcement_sigs,
+ channel_ready, shutdown_msg, announcement_sigs,
raa: required_revoke,
commitment_update: Some(commitment_update),
order: self.resend_order.clone(),
},
Ok((None, holding_cell_failed_htlcs)) => {
Ok(ReestablishResponses {
- funding_locked, shutdown_msg, announcement_sigs,
+ channel_ready, shutdown_msg, announcement_sigs,
raa: required_revoke,
commitment_update: None,
order: self.resend_order.clone(),
}
} else {
Ok(ReestablishResponses {
- funding_locked, shutdown_msg, announcement_sigs,
+ channel_ready, shutdown_msg, announcement_sigs,
raa: required_revoke,
commitment_update: None,
order: self.resend_order.clone(),
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
self.monitor_pending_commitment_signed = true;
Ok(ReestablishResponses {
- funding_locked, shutdown_msg, announcement_sigs,
+ channel_ready, shutdown_msg, announcement_sigs,
commitment_update: None, raa: None, mon_update: None,
order: self.resend_order.clone(),
holding_cell_failed_htlcs: Vec::new(),
})
} else {
Ok(ReestablishResponses {
- funding_locked, shutdown_msg, announcement_sigs,
+ channel_ready, shutdown_msg, announcement_sigs,
raa: required_revoke,
commitment_update: Some(self.get_last_commitment_update(logger)),
order: self.resend_order.clone(),
/// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
/// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
/// at which point they will be recalculated.
- fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &F) -> (u64, u64)
+ fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+ -> (u64, u64)
where F::Target: FeeEstimator
{
if let Some((min, max)) = self.closing_fee_limits { return (min, max); }
// Propose a range from our current Background feerate to our Normal feerate plus our
// force_close_avoidance_max_fee_satoshis.
// If we fail to come to consensus, we'll have to force-close.
- let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
- let normal_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
+ let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() };
// The spec requires that (when the channel does not have anchors) we only send absolute
// We always add force_close_avoidance_max_fee_satoshis to our normal
// feerate-calculated fee, but allow the max to be overridden if we're using a
// target feerate-calculated fee.
- cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.config.force_close_avoidance_max_fee_satoshis,
+ cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.config.options.force_close_avoidance_max_fee_satoshis,
proposed_max_feerate as u64 * tx_weight / 1000)
} else {
self.channel_value_satoshis - (self.value_to_self_msat + 999) / 1000
Ok(())
}
- pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(&mut self, fee_estimator: &F, logger: &L)
+ pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
-> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
where F::Target: FeeEstimator, L::Target: Logger
{
tx
}
- pub fn closing_signed<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::ClosingSigned) -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
+ pub fn closing_signed<F: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
where F::Target: FeeEstimator
{
if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
&self.channel_type
}
- /// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
+ /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
/// is_usable() returns true).
/// Allowed in any state (including after shutdown)
pub fn get_short_channel_id(&self) -> Option<u64> {
}
pub fn get_fee_proportional_millionths(&self) -> u32 {
- self.config.forwarding_fee_proportional_millionths
+ self.config.options.forwarding_fee_proportional_millionths
}
pub fn get_cltv_expiry_delta(&self) -> u16 {
- cmp::max(self.config.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
+ cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
}
pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
- self.config.max_dust_htlc_exposure_msat
+ self.config.options.max_dust_htlc_exposure_msat
+ }
+
+ /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
+ pub fn prev_config(&self) -> Option<ChannelConfig> {
+ self.prev_config.map(|prev_config| prev_config.0)
+ }
+
+ /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
+ /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
+ /// no longer be considered when forwarding HTLCs.
+ pub fn maybe_expire_prev_config(&mut self) {
+ if self.prev_config.is_none() {
+ return;
+ }
+ let prev_config = self.prev_config.as_mut().unwrap();
+ prev_config.1 += 1;
+ if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
+ self.prev_config = None;
+ }
+ }
+
+ /// Returns the current [`ChannelConfig`] applied to the channel.
+ pub fn config(&self) -> ChannelConfig {
+ self.config.options
+ }
+
+ /// Updates the channel's config. A bool is returned indicating whether the config update
+ /// applied resulted in a new ChannelUpdate message.
+ pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
+ let did_channel_update =
+ self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
+ self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
+ self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
+ if did_channel_update {
+ self.prev_config = Some((self.config.options, 0));
+ // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
+ // policy change to propagate throughout the network.
+ self.update_time_counter += 1;
+ }
+ self.config.options = *config;
+ did_channel_update
+ }
+
+ fn internal_htlc_satisfies_config(
+ &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
+ ) -> Result<(), (&'static str, u16)> {
+ let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
+ .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
+ if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
+ (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
+ return Err((
+ "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
+ 0x1000 | 12, // fee_insufficient
+ ));
+ }
+ if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
+ return Err((
+ "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+ 0x1000 | 13, // incorrect_cltv_expiry
+ ));
+ }
+ Ok(())
+ }
+
+ /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
+ /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
+ /// unsuccessful, falls back to the previous one if one exists.
+ pub fn htlc_satisfies_config(
+ &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
+ ) -> Result<(), (&'static str, u16)> {
+ self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.config())
+ .or_else(|err| {
+ if let Some(prev_config) = self.prev_config() {
+ self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
+ } else {
+ Err(err)
+ }
+ })
}
pub fn get_feerate(&self) -> u32 {
/// Gets the fee we'd want to charge for adding an HTLC output to this Channel
/// Allowed in any state (including after shutdown)
pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
- self.config.forwarding_fee_base_msat
+ self.config.options.forwarding_fee_base_msat
}
/// Returns true if we've ever received a message from the remote end for this Channel
/// Allowed in any state (including after shutdown)
pub fn is_usable(&self) -> bool {
let mask = ChannelState::ChannelFunded as u32 | BOTH_SIDES_SHUTDOWN_MASK;
- (self.channel_state & mask) == (ChannelState::ChannelFunded as u32) && !self.monitor_pending_funding_locked
+ (self.channel_state & mask) == (ChannelState::ChannelFunded as u32) && !self.monitor_pending_channel_ready
}
/// Returns true if this channel is currently available for use. This is a superset of
self.channel_state >= ChannelState::FundingSent as u32
}
+ /// Returns true if our channel_ready has been sent
+ pub fn is_our_channel_ready(&self) -> bool {
+ (self.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.channel_state >= ChannelState::ChannelFunded as u32
+ }
+
/// Returns true if our peer has either initiated or agreed to shut down the channel.
pub fn received_shutdown(&self) -> bool {
(self.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
self.channel_update_status = status;
}
- fn check_get_funding_locked(&mut self, height: u32) -> Option<msgs::FundingLocked> {
- if self.funding_tx_confirmation_height == 0 {
+ fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
+ if self.funding_tx_confirmation_height == 0 && self.minimum_depth != Some(0) {
return None;
}
let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.channel_state |= ChannelState::OurFundingLocked as u32;
+ self.channel_state |= ChannelState::OurChannelReady as u32;
true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
self.update_time_counter += 1;
true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
// We got a reorg but not enough to trigger a force close, just ignore.
false
- } else if self.channel_state < ChannelState::ChannelFunded as u32 {
- panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
} else {
+ if self.channel_state < ChannelState::ChannelFunded as u32 {
+ // We should never see a funding transaction on-chain until we've received
+ // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
+ // an inbound channel - before that we have no known funding TXID). The fuzzer,
+ // however, may do this and we shouldn't treat it as a bug.
+ #[cfg(not(fuzzing))]
+ panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
+ Do NOT broadcast a funding transaction manually - let LDK do it for you!",
+ self.channel_state);
+ }
// We got a reorg but not enough to trigger a force close, just ignore.
false
};
if self.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
let next_per_commitment_point =
self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.secp_ctx);
- return Some(msgs::FundingLocked {
+ return Some(msgs::ChannelReady {
channel_id: self.channel_id,
next_per_commitment_point,
short_channel_id_alias: Some(self.outbound_scid_alias),
});
}
} else {
- self.monitor_pending_funding_locked = true;
+ self.monitor_pending_channel_ready = true;
}
}
None
/// In the second, we simply return an Err indicating we need to be force-closed now.
pub fn transactions_confirmed<L: Deref>(&mut self, block_hash: &BlockHash, height: u32,
txdata: &TransactionData, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L)
- -> Result<(Option<msgs::FundingLocked>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
- let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
+ -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
if let Some(funding_txo) = self.get_funding_txo() {
for &(index_in_block, tx) in txdata.iter() {
- // If we haven't yet sent a funding_locked, but are in FundingSent (ignoring
- // whether they've sent a funding_locked or not), check if we should send one.
- if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
+ // Check if the transaction is the expected funding transaction, and if it is,
+ // check that it pays the right amount to the right script.
+ if self.funding_tx_confirmation_height == 0 {
if tx.txid() == funding_txo.txid {
let txo_idx = funding_txo.index as usize;
if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
}
}
}
- // If we allow 1-conf funding, we may need to check for funding_locked here and
+ // If we allow 1-conf funding, we may need to check for channel_ready here and
// send it immediately instead of waiting for a best_block_updated call (which
// may have already happened for this block).
- if let Some(funding_locked) = self.check_get_funding_locked(height) {
- log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
+ if let Some(channel_ready) = self.check_get_channel_ready(height) {
+ log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id));
let announcement_sigs = self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger);
- return Ok((Some(funding_locked), announcement_sigs));
+ return Ok((Some(channel_ready), announcement_sigs));
}
}
for inp in tx.input.iter() {
/// May return some HTLCs (and their payment_hash) which have timed out and should be failed
/// back.
pub fn best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash, node_pk: PublicKey, logger: &L)
- -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
+ -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_pk)), logger)
}
fn do_best_block_updated<L: Deref>(&mut self, height: u32, highest_header_time: u32, genesis_node_pk: Option<(BlockHash, PublicKey)>, logger: &L)
- -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
+ -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason> where L::Target: Logger {
let mut timed_out_htlcs = Vec::new();
// This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
// forward an HTLC when our counterparty should almost certainly just fail it for expiring
self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
- if let Some(funding_locked) = self.check_get_funding_locked(height) {
+ if let Some(channel_ready) = self.check_get_channel_ready(height) {
let announcement_sigs = if let Some((genesis_block_hash, node_pk)) = genesis_node_pk {
self.get_announcement_sigs(node_pk, genesis_block_hash, height, logger)
} else { None };
- log_info!(logger, "Sending a funding_locked to our peer for channel {}", log_bytes!(self.channel_id));
- return Ok((Some(funding_locked), timed_out_htlcs, announcement_sigs));
+ log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.channel_id));
+ return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
}
let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
if non_shutdown_state >= ChannelState::ChannelFunded as u32 ||
- (non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32 {
+ (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
if self.funding_tx_confirmation_height == 0 {
- // Note that check_get_funding_locked may reset funding_tx_confirmation_height to
+ // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
// zero if it has been reorged out, however in either case, our state flags
- // indicate we've already sent a funding_locked
+ // indicate we've already sent a channel_ready
funding_tx_confirmations = 0;
}
- // If we've sent funding_locked (or have both sent and received funding_locked), and
+ // If we've sent channel_ready (or have both sent and received channel_ready), and
// the funding transaction has become unconfirmed,
// close the channel and hope we can get the latest state on chain (because presumably
// the funding transaction is at least still in the mempool of most nodes).
//
- // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf channel,
- // but not doing so may lead to the `ChannelManager::short_to_id` map being
- // inconsistent, so we currently have to.
+ // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
+ // 0-conf channel, but not doing so may lead to the
+ // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
+ // to.
if funding_tx_confirmations == 0 && self.funding_tx_confirmed_in.is_some() {
let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
self.minimum_depth.unwrap(), funding_tx_confirmations);
log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.channel_id));
// If funding_tx_confirmed_in is unset, the channel must not be active
assert!(non_shutdown_state <= ChannelState::ChannelFunded as u32);
- assert_eq!(non_shutdown_state & ChannelState::OurFundingLocked as u32, 0);
+ assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
return Err(ClosureReason::FundingTimedOut);
}
/// Indicates the funding transaction is no longer confirmed in the main chain. This may
/// force-close the channel, but may also indicate a harmless reorganization of a block or two
- /// before the channel has reached funding_locked and we can just wait for more blocks.
+ /// before the channel has reached channel_ready and we can just wait for more blocks.
pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
if self.funding_tx_confirmation_height != 0 {
// We handle the funding disconnection by calling best_block_updated with a height one
// time we saw and it will be ignored.
let best_time = self.update_time_counter;
match self.do_best_block_updated(reorg_height, best_time, None, logger) {
- Ok((funding_locked, timed_out_htlcs, announcement_sigs)) => {
- assert!(funding_locked.is_none(), "We can't generate a funding with 0 confirmations?");
+ Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
+ assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
Ok(())
self.inbound_awaiting_accept
}
+ /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
+ pub fn set_0conf(&mut self) {
+ assert!(self.inbound_awaiting_accept);
+ self.minimum_depth = Some(0);
+ }
+
/// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
/// should be sent back to the counterparty node.
///
}
/// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
- /// announceable and available for use (have exchanged FundingLocked messages in both
+ /// announceable and available for use (have exchanged ChannelReady messages in both
/// directions). Should be used for both broadcasted announcements and in response to an
/// AnnouncementSignatures message from the remote peer.
///
commitment_txid: counterparty_commitment_txid,
htlc_outputs: htlcs.clone(),
commitment_number: self.cur_counterparty_commitment_transaction_number,
- their_revocation_point: self.counterparty_cur_commitment_point.unwrap()
+ their_per_commitment_point: self.counterparty_cur_commitment_point.unwrap()
}]
};
self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) {
+ pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
// Note that we MUST only generate a monitor update that indicates force-closure - we're
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
// return them to fail the payment.
let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+ let counterparty_node_id = self.get_counterparty_node_id();
for htlc_update in self.holding_cell_htlc_updates.drain(..) {
match htlc_update {
HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
- dropped_outbound_htlcs.push((source, payment_hash));
+ dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
},
_ => {}
}
}
const SERIALIZATION_VERSION: u8 = 2;
-const MIN_SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 2;
impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
(0, FailRelay),
self.user_id.write(writer)?;
- // Write out the old serialization for the config object. This is read by version-1
- // deserializers, but we will read the version in the TLV at the end instead.
- self.config.forwarding_fee_proportional_millionths.write(writer)?;
- self.config.cltv_expiry_delta.write(writer)?;
- self.config.announced_channel.write(writer)?;
- self.config.commit_upfront_shutdown_pubkey.write(writer)?;
+ // Version 1 deserializers expected to read parts of the config object here. Version 2
+ // deserializers (0.0.99) now read config through TLVs, and as we now require them for
+ // `minimum_depth` we simply write dummy values here.
+ writer.write_all(&[0; 8])?;
self.channel_id.write(writer)?;
(self.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
}
- self.monitor_pending_funding_locked.write(writer)?;
+ self.monitor_pending_channel_ready.write(writer)?;
self.monitor_pending_revoke_and_ack.write(writer)?;
self.monitor_pending_commitment_signed.write(writer)?;
if self.holder_selected_channel_reserve_satoshis != Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis)
{ Some(self.holder_selected_channel_reserve_satoshis) } else { None };
- let mut old_max_in_flight_percent_config = UserConfig::default().own_channel_config;
+ let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
let serialized_holder_htlc_max_in_flight =
if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config)
let user_id = Readable::read(reader)?;
- let mut config = Some(ChannelConfig::default());
+ let mut config = Some(LegacyChannelConfig::default());
if ver == 1 {
// Read the old serialization of the ChannelConfig from version 0.0.98.
- config.as_mut().unwrap().forwarding_fee_proportional_millionths = Readable::read(reader)?;
- config.as_mut().unwrap().cltv_expiry_delta = Readable::read(reader)?;
+ config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
+ config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
} else {
_ => return Err(DecodeError::InvalidValue),
};
- let monitor_pending_funding_locked = Readable::read(reader)?;
+ let monitor_pending_channel_ready = Readable::read(reader)?;
let monitor_pending_revoke_and_ack = Readable::read(reader)?;
let monitor_pending_commitment_signed = Readable::read(reader)?;
let mut target_closing_feerate_sats_per_kw = None;
let mut monitor_pending_finalized_fulfills = Some(Vec::new());
let mut holder_selected_channel_reserve_satoshis = Some(Self::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
- let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().own_channel_config));
+ let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
// Prior to supporting channel type negotiation, all of our channels were static_remotekey
// only, so we default to that if none was written.
let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
config: config.unwrap(),
+ prev_config: None,
+
// Note that we don't care about serializing handshake limits as we only ever serialize
// channel data after the handshake has completed.
inbound_handshake_limits_override: None,
resend_order,
- monitor_pending_funding_locked,
+ monitor_pending_channel_ready,
monitor_pending_revoke_and_ack,
monitor_pending_commitment_signed,
monitor_pending_forwards,
use ln::channelmanager::{HTLCSource, PaymentId};
use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS};
- use ln::features::InitFeatures;
- use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
+ use ln::features::{InitFeatures, ChannelTypeFeatures};
+ use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT};
use ln::script::ShutdownScript;
use ln::chan_utils;
use ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
use chain::BestBlock;
- use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
+ use chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
use chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial, KeysInterface};
use chain::transaction::OutPoint;
use util::config::UserConfig;
fn test_no_fee_check_overflow() {
// Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
// arithmetic, causing a panic with debug assertions enabled.
- assert!(Channel::<InMemorySigner>::check_remote_fee(&&TestFeeEstimator { fee_est: 42 }, u32::max_value()).is_err());
+ let fee_est = TestFeeEstimator { fee_est: 42 };
+ let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
+ assert!(Channel::<InMemorySigner>::check_remote_fee(&bounded_fee_estimator, u32::max_value()).is_err());
}
struct Keys {
returns: non_v0_segwit_shutdown_script.clone(),
});
- let fee_estimator = TestFeeEstimator { fee_est: 253 };
let secp_ctx = Secp256k1::new();
let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- match Channel::<EnforcingSigner>::new_outbound(&&fee_estimator, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
+ match Channel::<EnforcingSigner>::new_outbound(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
Err(APIError::IncompatibleShutdownScript { script }) => {
assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
},
fn test_open_channel_msg_fee() {
let original_fee = 253;
let mut fee_est = TestFeeEstimator{fee_est: original_fee };
+ let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&bounded_fee_estimator, &&keys_provider, node_a_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
// Now change the fee so we can check that the fee in the open_channel message is the
// same as the old fee.
fn test_holder_vs_counterparty_dust_limit() {
// Test that when calculating the local and remote commitment transaction fees, the correct
// dust limits are used.
- let feeest = TestFeeEstimator{fee_est: 15000};
+ let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
// Make sure A's dust limit is as we expect.
let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+ let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
accept_channel_msg.dust_limit_satoshis = 546;
- node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap();
+ node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &InitFeatures::known()).unwrap();
node_a_chan.holder_dust_limit_satoshis = 1560;
// Put some inbound and outbound HTLCs in A's channel.
// calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
// *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
// `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
- let fee_est = TestFeeEstimator{fee_est: 253 };
+ let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let mut chan = Channel::<EnforcingSigner>::new_outbound(&fee_est, &&keys_provider, node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
let commitment_tx_fee_0_htlcs = Channel::<EnforcingSigner>::commit_tx_fee_msat(chan.feerate_per_kw, 0, chan.opt_anchors());
let commitment_tx_fee_1_htlc = Channel::<EnforcingSigner>::commit_tx_fee_msat(chan.feerate_per_kw, 1, chan.opt_anchors());
#[test]
fn channel_reestablish_no_updates() {
- let feeest = TestFeeEstimator{fee_est: 15000};
+ let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
let logger = test_utils::TestLogger::new();
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+ let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
// Node B --> Node A: accept channel
let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
- node_a_chan.accept_channel(&accept_channel_msg, &config.peer_channel_config_limits, &InitFeatures::known()).unwrap();
+ node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &InitFeatures::known()).unwrap();
// Node A --> Node B: funding created
let output_script = node_a_chan.get_funding_redeemscript();
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
- let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&logger).unwrap();
+ let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&logger).unwrap();
// Node B --> Node A: funding signed
let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&logger);
#[test]
fn test_configured_holder_max_htlc_value_in_flight() {
- let feeest = TestFeeEstimator{fee_est: 15000};
+ let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
let logger = test_utils::TestLogger::new();
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
let mut config_2_percent = UserConfig::default();
- config_2_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
+ config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
let mut config_99_percent = UserConfig::default();
- config_99_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
+ config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
let mut config_0_percent = UserConfig::default();
- config_0_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
+ config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
let mut config_101_percent = UserConfig::default();
- config_101_percent.own_channel_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
+ config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
// Test that `new_outbound` creates a channel with the correct value for
// `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
// which is set to the lower bound + 1 (2%) of the `channel_value`.
- let chan_1 = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
+ let chan_1 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
let chan_1_value_msat = chan_1.channel_value_satoshis * 1000;
assert_eq!(chan_1.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
// Test with the upper bound - 1 of valid values (99%).
- let chan_2 = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
+ let chan_2 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
let chan_2_value_msat = chan_2.channel_value_satoshis * 1000;
assert_eq!(chan_2.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
// Test that `new_from_req` creates a channel with the correct value for
// `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
// which is set to the lower bound - 1 (2%) of the `channel_value`.
- let chan_3 = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
+ let chan_3 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
let chan_3_value_msat = chan_3.channel_value_satoshis * 1000;
assert_eq!(chan_3.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
// Test with the upper bound - 1 of valid values (99%).
- let chan_4 = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
+ let chan_4 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
let chan_4_value_msat = chan_4.channel_value_satoshis * 1000;
assert_eq!(chan_4.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
// Test that `new_outbound` uses the lower bound of the configurable percentage values (1%)
// if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
- let chan_5 = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
+ let chan_5 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
let chan_5_value_msat = chan_5.channel_value_satoshis * 1000;
assert_eq!(chan_5.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
// Test that `new_outbound` uses the upper bound of the configurable percentage values
// (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
// than 100.
- let chan_6 = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
+ let chan_6 = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, outbound_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
let chan_6_value_msat = chan_6.channel_value_satoshis * 1000;
assert_eq!(chan_6.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
// Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
// if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
- let chan_7 = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
+ let chan_7 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
let chan_7_value_msat = chan_7.channel_value_satoshis * 1000;
assert_eq!(chan_7.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
// Test that `new_from_req` uses the upper bound of the configurable percentage values
// (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
// than 100.
- let chan_8 = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
+ let chan_8 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
let chan_8_value_msat = chan_8.channel_value_satoshis * 1000;
assert_eq!(chan_8.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
}
#[test]
fn channel_update() {
- let feeest = TestFeeEstimator{fee_est: 15000};
+ let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
// Create a channel.
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
+ let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider, node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
assert!(node_a_chan.counterparty_forwarding_info.is_none());
assert_eq!(node_a_chan.holder_htlc_minimum_msat, 1); // the default
assert!(node_a_chan.counterparty_forwarding_info().is_none());
flags: 0,
cltv_expiry_delta: 100,
htlc_minimum_msat: 5,
- htlc_maximum_msat: OptionalField::Absent,
+ htlc_maximum_msat: MAX_VALUE_MSAT,
fee_base_msat: 110,
fee_proportional_millionths: 11,
excess_data: Vec::new(),
let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let mut config = UserConfig::default();
- config.channel_options.announced_channel = false;
- let mut chan = Channel::<InMemorySigner>::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, &InitFeatures::known(), 10_000_000, 100000, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
+ config.channel_handshake_config.announced_channel = false;
+ let mut chan = Channel::<InMemorySigner>::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, counterparty_node_id, &InitFeatures::known(), 10_000_000, 100000, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
chan.holder_dust_limit_satoshis = 546;
chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret).unwrap(),
SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
}
+
+ #[test]
+ fn test_zero_conf_channel_type_support() {
+ let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
+ let secp_ctx = Secp256k1::new();
+ let seed = [42; 32];
+ let network = Network::Testnet;
+ let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+ let logger = test_utils::TestLogger::new();
+
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let config = UserConfig::default();
+ let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&feeest, &&keys_provider,
+ node_b_node_id, &InitFeatures::known(), 10000000, 100000, 42, &config, 0, 42).unwrap();
+
+ let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
+ channel_type_features.set_zero_conf_required();
+
+ let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+ open_channel_msg.channel_type = Some(channel_type_features);
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+ let res = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider,
+ node_b_node_id, &InitFeatures::known(), &open_channel_msg, 7, &config, 0, &&logger, 42);
+ assert!(res.is_ok());
+ }
}