X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=33a46084abb4d1bba4bf3de72168c4d1b3933b7d;hb=d0f0d9c19f86e5056bbb768e06c6d5f2fb6b925d;hp=35aeb3e5540f7abf022b9015918571237726d493;hpb=0b196ebae6b9b1f861c2fc2ceddcc128b9f46f18;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 35aeb3e5..33a46084 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7,6 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. +use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::{Script,Builder}; use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType}; use bitcoin::util::sighash; @@ -22,7 +23,7 @@ use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature}; use bitcoin::secp256k1; -use crate::ln::{PaymentPreimage, PaymentHash}; +use crate::ln::{ChannelId, PaymentPreimage, PaymentHash}; use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; use crate::ln::msgs::DecodeError; @@ -35,7 +36,7 @@ use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; +use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; use crate::events::ClosureReason; use crate::routing::gossip::NodeId; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; @@ -51,6 +52,7 @@ use core::ops::Deref; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; use bitcoin::hashes::hex::ToHex; +use crate::sign::type_resolver::ChannelSignerType; #[cfg(test)] pub struct ChannelValueStat { @@ -65,6 +67,8 @@ pub struct ChannelValueStat { } pub struct AvailableBalances { + /// The amount that would go to us if we close the channel, ignoring any on-chain fees. + pub balance_msat: u64, /// Total amount available for our counterparty to send to us. pub inbound_capacity_msat: u64, /// Total amount available for us to send to our counterparty. @@ -299,9 +303,24 @@ enum ChannelState { /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about /// to drop us, but we store this anyway. ShutdownComplete = 4096, + /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the + /// broadcasting of the funding transaction is being held until all channels in the batch + /// have received funding_signed and have their monitors persisted. + WaitingForBatch = 1 << 13, } -const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32; -const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32; +const BOTH_SIDES_SHUTDOWN_MASK: u32 = + ChannelState::LocalShutdownSent as u32 | + ChannelState::RemoteShutdownSent as u32; +const MULTI_STATE_FLAGS: u32 = + BOTH_SIDES_SHUTDOWN_MASK | + ChannelState::PeerDisconnected as u32 | + ChannelState::MonitorUpdateInProgress as u32; +const STATE_FLAGS: u32 = + MULTI_STATE_FLAGS | + ChannelState::TheirChannelReady as u32 | + ChannelState::OurChannelReady as u32 | + ChannelState::AwaitingRemoteRevoke as u32 | + ChannelState::WaitingForBatch as u32; pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -524,15 +543,17 @@ pub(super) struct ReestablishResponses { pub shutdown_msg: Option, } -/// The return type of `force_shutdown` -/// -/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple -/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this -/// channel's counterparty_node_id and channel_id). -pub(crate) type ShutdownResult = ( - Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, - Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])> -); +/// The result of a shutdown that should be handled. +#[must_use] +pub(crate) struct ShutdownResult { + /// A channel monitor update to apply. + pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, + /// A list of dropped outbound HTLCs that can safely be failed backwards immediately. + pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>, + /// An unbroadcasted batch funding transaction id. The closure of this channel should be + /// propagated to the remainder of the batch. + pub(crate) unbroadcasted_batch_funding_txid: Option, +} /// If the majority of the channels funds are to the fundee and the initiator holds only just /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the @@ -593,6 +614,9 @@ pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2; /// exceeding this age limit will be force-closed and purged from memory. pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60; +/// Number of blocks needed for an output from a coinbase transaction to be spendable. +pub(crate) const COINBASE_MATURITY: u32 = 100; + struct PendingChannelMonitorUpdate { update: ChannelMonitorUpdate, } @@ -601,6 +625,35 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { (0, update, required), }); +/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of +/// its variants containing an appropriate channel struct. +pub(super) enum ChannelPhase where SP::Target: SignerProvider { + UnfundedOutboundV1(OutboundV1Channel), + UnfundedInboundV1(InboundV1Channel), + Funded(Channel), +} + +impl<'a, SP: Deref> ChannelPhase where + SP::Target: SignerProvider, + ::Signer: ChannelSigner, +{ + pub fn context(&'a self) -> &'a ChannelContext { + match self { + ChannelPhase::Funded(chan) => &chan.context, + ChannelPhase::UnfundedOutboundV1(chan) => &chan.context, + ChannelPhase::UnfundedInboundV1(chan) => &chan.context, + } + } + + pub fn context_mut(&'a mut self) -> &'a mut ChannelContext { + match self { + ChannelPhase::Funded(ref mut chan) => &mut chan.context, + ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context, + ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context, + } + } +} + /// Contains all state common to unfunded inbound/outbound channels. pub(super) struct UnfundedChannelContext { /// A counter tracking how many ticks have elapsed since this unfunded channel was @@ -624,7 +677,7 @@ impl UnfundedChannelContext { } /// Contains everything about the channel including state, and various flags. -pub(super) struct ChannelContext { +pub(super) struct ChannelContext where SP::Target: SignerProvider { config: LegacyChannelConfig, // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were @@ -636,8 +689,11 @@ pub(super) struct ChannelContext { user_id: u128, - channel_id: [u8; 32], - temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115. + /// The current channel ID. + channel_id: ChannelId, + /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID. + /// Will be `None` for channels created prior to 0.0.115. + temporary_channel_id: Option, channel_state: u32, // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to @@ -657,7 +713,7 @@ pub(super) struct ChannelContext { latest_monitor_update_id: u64, - holder_signer: Signer, + holder_signer: ChannelSignerType<::Signer>, shutdown_scriptpubkey: Option, destination_script: Script, @@ -667,7 +723,7 @@ pub(super) struct ChannelContext { cur_holder_commitment_transaction_number: u64, cur_counterparty_commitment_transaction_number: u64, - value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees + value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs pending_inbound_htlcs: Vec, pending_outbound_htlcs: Vec, holding_cell_htlc_updates: Vec, @@ -785,6 +841,7 @@ pub(super) struct ChannelContext { pub(crate) channel_transaction_parameters: ChannelTransactionParameters, funding_transaction: Option, + is_batch_funding: Option<()>, counterparty_cur_commitment_point: Option, counterparty_prev_commitment_point: Option, @@ -883,7 +940,7 @@ pub(super) struct ChannelContext { blocked_monitor_updates: Vec, } -impl ChannelContext { +impl ChannelContext where SP::Target: SignerProvider { /// Allowed in any state (including after shutdown) pub fn get_update_time_counter(&self) -> u32 { self.update_time_counter @@ -909,7 +966,7 @@ impl ChannelContext { /// Returns true if we've ever received a message from the remote end for this Channel pub fn have_received_message(&self) -> bool { - self.channel_state > (ChannelState::OurInitSent as u32) + self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32) } /// Returns true if this channel is fully established and not known to be closing. @@ -956,14 +1013,14 @@ impl ChannelContext { // Public utilities: - pub fn channel_id(&self) -> [u8; 32] { + pub fn channel_id(&self) -> ChannelId { self.channel_id } // Return the `temporary_channel_id` used during channel establishment. // // Will return `None` for channels created prior to LDK version 0.0.115. - pub fn temporary_channel_id(&self) -> Option<[u8; 32]> { + pub fn temporary_channel_id(&self) -> Option { self.temporary_channel_id } @@ -1111,7 +1168,7 @@ impl ChannelContext { match self.config.options.max_dust_htlc_exposure { MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight( - ConfirmationTarget::HighPriority); + ConfirmationTarget::OnChainSweep); feerate_per_kw as u64 * multiplier }, MaxDustHTLCExposure::FixedLimitMsat(limit) => limit, @@ -1125,7 +1182,7 @@ impl ChannelContext { // Checks whether we should emit a `ChannelPending` event. pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool { - self.is_funding_initiated() && !self.channel_pending_event_emitted + self.is_funding_broadcast() && !self.channel_pending_event_emitted } // Returns whether we already emitted a `ChannelPending` event. @@ -1184,9 +1241,11 @@ impl ChannelContext { did_channel_update } - /// Returns true if funding_created was sent/received. - pub fn is_funding_initiated(&self) -> bool { - self.channel_state >= ChannelState::FundingSent as u32 + /// Returns true if funding_signed was sent/received and the + /// funding transaction has been broadcast if necessary. + pub fn is_funding_broadcast(&self) -> bool { + self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && + self.channel_state & ChannelState::WaitingForBatch as u32 == 0 } /// Transaction nomenclature is somewhat confusing here as there are many different cases - a @@ -1231,7 +1290,8 @@ impl ChannelContext { log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), - log_bytes!(self.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); + &self.channel_id, + if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); macro_rules! get_htlc_in_commitment { ($htlc: expr, $offered: expr) => { @@ -1255,10 +1315,10 @@ impl ChannelContext { feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000 }; if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { - log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat); included_non_dust_htlcs.push((htlc_in_tx, $source)); } else { - log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat); included_dust_htlcs.push((htlc_in_tx, $source)); } } else { @@ -1269,10 +1329,10 @@ impl ChannelContext { feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000 }; if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee { - log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat); included_non_dust_htlcs.push((htlc_in_tx, $source)); } else { - log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat); + log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat); included_dust_htlcs.push((htlc_in_tx, $source)); } } @@ -1292,7 +1352,7 @@ impl ChannelContext { add_htlc_output!(htlc, false, None, state_name); remote_htlc_total_msat += htlc.amount_msat; } else { - log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); + log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name); match &htlc.state { &InboundHTLCState::LocalRemoved(ref reason) => { if generated_by_local { @@ -1332,7 +1392,7 @@ impl ChannelContext { add_htlc_output!(htlc, true, Some(&htlc.source), state_name); local_htlc_total_msat += htlc.amount_msat; } else { - log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, log_bytes!(htlc.payment_hash.0), htlc.amount_msat, state_name); + log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name); match htlc.state { OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => { value_to_self_msat_offset -= htlc.amount_msat as i64; @@ -1442,7 +1502,7 @@ impl ChannelContext { /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction) /// TODO Some magic rust shit to compile-time check this? fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys { - let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx); + let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx); let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); @@ -1597,6 +1657,14 @@ impl ChannelContext { let inbound_stats = context.get_inbound_pending_htlc_stats(None); let outbound_stats = context.get_outbound_pending_htlc_stats(None); + let mut balance_msat = context.value_to_self_msat; + for ref htlc in context.pending_inbound_htlcs.iter() { + if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { + balance_msat += htlc.amount_msat; + } + } + balance_msat -= outbound_stats.pending_htlcs_value_msat; + let outbound_capacity_msat = context.value_to_self_msat .saturating_sub(outbound_stats.pending_htlcs_value_msat) .saturating_sub( @@ -1604,6 +1672,11 @@ impl ChannelContext { let mut available_capacity_msat = outbound_capacity_msat; + let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000 + } else { + 0 + }; if context.is_outbound() { // We should mind channel commit tx fee when computing how much of the available capacity // can be used in the next htlc. Mirrors the logic in send_htlc. @@ -1618,14 +1691,19 @@ impl ChannelContext { } let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered); - let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(())); + let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(())); let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered); - let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * context.next_local_commit_tx_fee_msat(htlc_dust, Some(())); + let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(())); + if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; + min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; + } // We will first subtract the fee as if we were above-dust. Then, if the resulting // value ends up being below dust, we have this fee available again. In that case, // match the value to right-below-dust. - let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64); + let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 - + max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64; if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 { let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat; debug_assert!(one_htlc_difference_msat != 0); @@ -1650,7 +1728,7 @@ impl ChannelContext { let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat) .saturating_sub(inbound_stats.pending_htlcs_value_msat); - if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat { + if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat { // If another HTLC's fee would reduce the remote's balance below the reserve limit // we've selected for them, we can only send dust HTLCs. available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1); @@ -1713,6 +1791,7 @@ impl ChannelContext { outbound_capacity_msat, next_outbound_htlc_limit_msat: available_capacity_msat, next_outbound_htlc_minimum_msat, + balance_msat, } } @@ -1915,15 +1994,41 @@ impl ChannelContext { res } - /// Returns transaction if there is pending funding transaction that is yet to broadcast - pub fn unbroadcasted_funding(&self) -> Option { - if self.channel_state & (ChannelState::FundingCreated as u32) != 0 { - self.funding_transaction.clone() + fn if_unbroadcasted_funding(&self, f: F) -> Option + where F: Fn() -> Option { + if self.channel_state & ChannelState::FundingCreated as u32 != 0 || + self.channel_state & ChannelState::WaitingForBatch as u32 != 0 { + f() } else { None } } + /// Returns the transaction if there is a pending funding transaction that is yet to be + /// broadcast. + pub fn unbroadcasted_funding(&self) -> Option { + self.if_unbroadcasted_funding(|| self.funding_transaction.clone()) + } + + /// Returns the transaction ID if there is a pending funding transaction that is yet to be + /// broadcast. + pub fn unbroadcasted_funding_txid(&self) -> Option { + self.if_unbroadcasted_funding(|| + self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid) + ) + } + + /// Returns whether the channel is funded in a batch. + pub fn is_batch_funding(&self) -> bool { + self.is_batch_funding.is_some() + } + + /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be + /// broadcast. + pub fn unbroadcasted_batch_funding_txid(&self) -> Option { + self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding()) + } + /// Gets the latest commitment transaction and any dependent transactions for relay (forcing /// shutdown of this channel - no more calls into this Channel may be made afterwards except /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). @@ -1964,10 +2069,15 @@ impl ChannelContext { })) } else { None } } else { None }; + let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid(); self.channel_state = ChannelState::ShutdownComplete as u32; self.update_time_counter += 1; - (monitor_update, dropped_outbound_htlcs) + ShutdownResult { + monitor_update, + dropped_outbound_htlcs, + unbroadcasted_batch_funding_txid, + } } } @@ -2022,21 +2132,16 @@ fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_feature // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs. // Note that num_htlcs should not include dust HTLCs. -fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 { +pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 { // Note that we need to divide before multiplying to round properly, // since the lowest denomination of bitcoin on-chain is the satoshi. (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 } -// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking -// has been completed, and then turn into a Channel to get compiler-time enforcement of things like -// calling channel_id() before we're set up or things like get_funding_signed on an -// inbound channel. -// // Holder designates channel data owned for the benefit of the user client. // Counterparty designates channel data owned by the another channel participant entity. -pub(super) struct Channel { - pub context: ChannelContext, +pub(super) struct Channel where SP::Target: SignerProvider { + pub context: ChannelContext, } #[cfg(any(test, fuzzing))] @@ -2048,7 +2153,10 @@ struct CommitmentTxInfoCached { feerate: u32, } -impl Channel { +impl Channel where + SP::Target: SignerProvider, + ::Signer: WriteableEcdsaChannelSigner +{ fn check_remote_fee( channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator, feerate_per_kw: u32, cur_feerate_per_kw: Option, logger: &L @@ -2061,28 +2169,20 @@ impl Channel { // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a // zero fee, so their fee is no longer considered to determine dust limits. if !channel_type.supports_anchors_zero_fee_htlc_tx() { - let upper_limit = cmp::max(250 * 25, - fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10); + let upper_limit = + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee) as u64; if feerate_per_kw as u64 > upper_limit { return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit))); } } - // We can afford to use a lower bound with anchors than previously since we can now bump - // fees when broadcasting our commitment. However, we must still make sure we meet the - // minimum mempool feerate, until package relay is deployed, such that we can ensure the - // commitment transaction propagates throughout node mempools on its own. let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() { - ConfirmationTarget::MempoolMinimum + ConfirmationTarget::MinAllowedAnchorChannelRemoteFee } else { - ConfirmationTarget::Background + ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee }; let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target); - // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing - // occasional issues with feerate disagreements between an initiator that wants a feerate - // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250 - // sat/kw before the comparison here. - if feerate_per_kw + 250 < lower_limit { + if feerate_per_kw < lower_limit { if let Some(cur_feerate) = cur_feerate_per_kw { if feerate_per_kw > cur_feerate { log_warn!(logger, @@ -2091,7 +2191,7 @@ impl Channel { return Ok(()); } } - return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit))); + return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit))); } Ok(()) } @@ -2202,8 +2302,6 @@ impl Channel { } assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()); - // ChannelManager may generate duplicate claims/fails due to HTLC update events from // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop // these, but for now we just have to treat them as normal. @@ -2212,13 +2310,15 @@ impl Channel { let mut htlc_value_msat = 0; for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() { if htlc.htlc_id == htlc_id_arg { - assert_eq!(htlc.payment_hash, payment_hash_calc); + debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner())); + log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}", + htlc.htlc_id, htlc.payment_hash, payment_preimage_arg); match htlc.state { InboundHTLCState::Committed => {}, InboundHTLCState::LocalRemoved(ref reason) => { if let &InboundHTLCRemovalReason::Fulfill(_) = reason { } else { - log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id())); + log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id()); debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); } return UpdateFulfillFetch::DuplicateClaim {}; @@ -2271,7 +2371,7 @@ impl Channel { }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id())); + log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id()); // TODO: We may actually be able to switch to a fulfill here, though its // rare enough it may not be worth the complexity burden. debug_assert!(false, "Tried to fulfill an HTLC that was already failed"); @@ -2281,7 +2381,7 @@ impl Channel { _ => {} } } - log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state); + log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); @@ -2299,7 +2399,7 @@ impl Channel { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } - log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); + log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id); htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone())); } @@ -2438,7 +2538,7 @@ impl Channel { _ => {} } } - log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); + log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id()); self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC { htlc_id: htlc_id_arg, err_packet, @@ -2446,7 +2546,7 @@ impl Channel { return Ok(None); } - log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id())); + log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id()); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone())); @@ -2463,11 +2563,10 @@ impl Channel { /// Handles a funding_signed message from the remote end. /// If this call is successful, broadcast the funding transaction (and not before!) - pub fn funding_signed( + pub fn funding_signed( &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result, ChannelError> + ) -> Result::Signer>, ChannelError> where - SP::Target: SignerProvider, L::Target: Logger { if !self.context.is_outbound() { @@ -2490,7 +2589,7 @@ impl Channel { let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; @@ -2512,7 +2611,7 @@ impl Channel { self.context.counterparty_funding_pubkey() ); - self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) + self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; @@ -2531,25 +2630,44 @@ impl Channel { obscure_factor, holder_commitment_tx, best_block, self.context.counterparty_node_id); - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); + channel_monitor.provide_initial_counterparty_commitment_tx( + counterparty_initial_bitcoin_tx.txid, Vec::new(), + self.context.cur_counterparty_commitment_transaction_number, + self.context.counterparty_cur_commitment_point.unwrap(), + counterparty_initial_commitment_tx.feerate_per_kw(), + counterparty_initial_commitment_tx.to_broadcaster_value_sat(), + counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! - self.context.channel_state = ChannelState::FundingSent as u32; + if self.context.is_batch_funding() { + self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32; + } else { + self.context.channel_state = ChannelState::FundingSent as u32; + } self.context.cur_holder_commitment_transaction_number -= 1; self.context.cur_counterparty_commitment_transaction_number -= 1; - log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id())); + log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); let need_channel_ready = self.check_get_channel_ready(0).is_some(); self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); Ok(channel_monitor) } + /// Updates the state of the channel to indicate that all channels in the batch have received + /// funding_signed and persisted their monitors. + /// The funding transaction is consequently allowed to be broadcast, and the channel can be + /// treated as a non-batch channel going forward. + pub fn set_batch_ready(&mut self) { + self.context.is_batch_funding = None; + self.context.channel_state &= !(ChannelState::WaitingForBatch as u32); + } + /// Handles a channel_ready message from our peer. If we've already sent our channel_ready /// and the channel is now usable (and public), this may generate an announcement_signatures to /// reply with. pub fn channel_ready( - &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash, + &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, logger: &L ) -> Result, ChannelError> where @@ -2572,7 +2690,13 @@ impl Channel { let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state == ChannelState::FundingSent as u32 { + // Our channel_ready shouldn't have been sent if we are waiting for other channels in the + // batch, but we can receive channel_ready messages. + debug_assert!( + non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 || + non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0 + ); + if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 { self.context.channel_state |= ChannelState::TheirChannelReady as u32; } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS); @@ -2612,9 +2736,9 @@ impl Channel { self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point; self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); - log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id())); + log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id()); - Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) + Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger)) } pub fn update_add_htlc( @@ -2655,6 +2779,7 @@ impl Channel { if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat { return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat))); } + // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet // the reserve_satoshis we told them to always have as direct payment so that they lose // something if we punish them for broadcasting an old state). @@ -2714,40 +2839,50 @@ impl Channel { // Check that the remote can afford to pay for this HTLC on-chain at the current // feerate_per_kw, while maintaining their channel reserve (as required by the spec). - let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else { - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations - }; - if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat { - return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned())); - }; - - if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 { - return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); + { + let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else { + let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); + self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations + }; + let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000 + } else { + 0 + }; + if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat { + return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned())); + }; + if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 { + return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); + } } + let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000 + } else { + 0 + }; if !self.context.is_outbound() { - // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from - // the spec because in the spec, the fee spike buffer requirement doesn't exist on the - // receiver's side, only on the sender's. - // Note that when we eventually remove support for fee updates and switch to anchor output - // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep - // the extra htlc when calculating the next remote commitment transaction fee as we should - // still be able to afford adding this HTLC plus one more future HTLC, regardless of being - // sensitive to fee spikes. + // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from + // the spec because the fee spike buffer requirement doesn't exist on the receiver's + // side, only on the sender's. Note that with anchor outputs we are no longer as + // sensitive to fee spikes, so we need to account for them. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); - if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat { + let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; + } + if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { // Note that if the pending_forward_status is not updated here, then it's because we're already failing // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id())); + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } else { // Check that they won't violate our local required channel reserve by adding this HTLC. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None); - if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat { + if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat { return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned())); } } @@ -2868,7 +3003,7 @@ impl Channel { log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), - log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); + log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id()); if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) { return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); } @@ -2938,7 +3073,7 @@ impl Channel { let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]); log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.", log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), - encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id())); + encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id()); if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); } @@ -2964,7 +3099,7 @@ impl Channel { self.context.counterparty_funding_pubkey() ); - self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) + self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; // Update state now that we've passed all the can-fail calls... @@ -2982,7 +3117,7 @@ impl Channel { } else { None }; if let Some(forward_info) = new_forward { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", - log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); + &htlc.payment_hash, &self.context.channel_id); htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); need_commitment = true; } @@ -2991,7 +3126,7 @@ impl Channel { for htlc in self.context.pending_outbound_htlcs.iter_mut() { if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", - log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id)); + &htlc.payment_hash, &self.context.channel_id); // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); mem::swap(outcome, &mut reason); @@ -3041,7 +3176,7 @@ impl Channel { monitor_update.updates.append(&mut additional_update.updates); } log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", - log_bytes!(self.context.channel_id)); + &self.context.channel_id); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -3058,7 +3193,7 @@ impl Channel { } else { false }; log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", - log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); + &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" }); self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -3071,7 +3206,7 @@ impl Channel { ) -> (Option, Vec<(HTLCSource, PaymentHash)>) where F::Target: FeeEstimator, L::Target: Logger { - if self.context.channel_state >= ChannelState::ChannelReady as u32 && + if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { self.free_holding_cell_htlcs(fee_estimator, logger) } else { (None, Vec::new()) } @@ -3087,7 +3222,7 @@ impl Channel { assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), - if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id())); + if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id()); let mut monitor_update = ChannelMonitorUpdate { update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet! @@ -3118,8 +3253,7 @@ impl Channel { Err(e) => { match e { ChannelError::Ignore(ref msg) => { - log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", - log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id())); + log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id()); // If we fail to send here, then this HTLC should // be failed backwards. Failing to send here // indicates that this HTLC may keep being put back @@ -3185,7 +3319,7 @@ impl Channel { monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.", - log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, + &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); @@ -3240,10 +3374,14 @@ impl Channel { *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; } - self.context.holder_signer.validate_counterparty_revocation( - self.context.cur_counterparty_commitment_transaction_number + 1, - &secret - ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?; + match &self.context.holder_signer { + ChannelSignerType::Ecdsa(ecdsa) => { + ecdsa.validate_counterparty_revocation( + self.context.cur_counterparty_commitment_transaction_number + 1, + &secret + ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?; + } + }; self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret) .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?; @@ -3270,7 +3408,7 @@ impl Channel { self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } - log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id())); + log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id()); let mut to_forward_infos = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); @@ -3287,7 +3425,7 @@ impl Channel { // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug) pending_inbound_htlcs.retain(|htlc| { if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state { - log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash); if let &InboundHTLCRemovalReason::Fulfill(_) = reason { value_to_self_msat_diff += htlc.amount_msat as i64; } @@ -3296,7 +3434,7 @@ impl Channel { }); pending_outbound_htlcs.retain(|htlc| { if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state { - log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash); if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :( revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason)); } else { @@ -3318,13 +3456,13 @@ impl Channel { mem::swap(&mut state, &mut htlc.state); if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); require_commitment = true; } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { match forward_info { PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); require_commitment = true; match fail_msg { HTLCFailureMsg::Relay(msg) => { @@ -3338,7 +3476,7 @@ impl Channel { } }, PendingHTLCStatus::Forward(forward_info) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); to_forward_infos.push((forward_info, htlc.htlc_id)); htlc.state = InboundHTLCState::Committed; } @@ -3348,11 +3486,11 @@ impl Channel { } for htlc in pending_outbound_htlcs.iter_mut() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { - log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash); htlc.state = OutboundHTLCState::Committed; } if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { - log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash); // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); mem::swap(outcome, &mut reason); @@ -3415,7 +3553,7 @@ impl Channel { self.context.monitor_pending_forwards.append(&mut to_forward_infos); self.context.monitor_pending_failures.append(&mut revoked_htlcs); self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); - log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id()); return_with_htlcs_to_fail!(Vec::new()); } @@ -3427,7 +3565,7 @@ impl Channel { monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.", - log_bytes!(self.context.channel_id()), release_state_str); + &self.context.channel_id(), release_state_str); self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); return_with_htlcs_to_fail!(htlcs_to_fail); @@ -3442,7 +3580,7 @@ impl Channel { monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.", - log_bytes!(self.context.channel_id()), + &self.context.channel_id(), update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), release_state_str); @@ -3450,7 +3588,7 @@ impl Channel { return_with_htlcs_to_fail!(htlcs_to_fail); } else { log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.", - log_bytes!(self.context.channel_id()), release_state_str); + &self.context.channel_id(), release_state_str); self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); return_with_htlcs_to_fail!(htlcs_to_fail); @@ -3542,17 +3680,17 @@ impl Channel { /// resent. /// No further message handling calls may be made until a channel_reestablish dance has /// completed. - pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) where L::Target: Logger { + /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately. + pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger { assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - if self.context.channel_state < ChannelState::FundingSent as u32 { - self.context.channel_state = ChannelState::ShutdownComplete as u32; - return; + if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { + return Err(()); } if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) { // While the below code should be idempotent, it's simpler to just return early, as // redundant disconnect events can fire, though they should be rare. - return; + return Ok(()); } if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed { @@ -3612,7 +3750,8 @@ impl Channel { self.context.sent_message_awaiting_response = None; self.context.channel_state |= ChannelState::PeerDisconnected as u32; - log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id())); + log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id()); + Ok(()) } /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. @@ -3644,7 +3783,7 @@ impl Channel { /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. pub fn monitor_updating_restored( - &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash, + &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32 ) -> MonitorRestoreUpdates where @@ -3658,12 +3797,12 @@ impl Channel { // (re-)broadcast the funding transaction as we may have declined to broadcast it when we // first received the funding_signed. let mut funding_broadcastable = - if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { + if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 { self.context.funding_transaction.take() } else { None }; // That said, if the funding transaction is already confirmed (ie we're active with a // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx. - if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) { + if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) { funding_broadcastable = None; } @@ -3677,7 +3816,7 @@ impl Channel { assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); self.context.monitor_pending_channel_ready = false; - let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); Some(msgs::ChannelReady { channel_id: self.context.channel_id(), next_per_commitment_point, @@ -3685,7 +3824,7 @@ impl Channel { }) } else { None }; - let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger); + let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); let mut accepted_htlcs = Vec::new(); mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); @@ -3715,7 +3854,7 @@ impl Channel { self.context.monitor_pending_commitment_signed = false; let order = self.context.resend_order.clone(); log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first", - log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, + &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" }, if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { @@ -3732,7 +3871,7 @@ impl Channel { if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } - Channel::::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; + Channel::::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?; let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None); self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); @@ -3759,8 +3898,8 @@ impl Channel { } fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { - let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); - let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); + let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2); msgs::RevokeAndACK { channel_id: self.context.channel_id, per_commitment_secret, @@ -3827,7 +3966,7 @@ impl Channel { } else { None }; log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" }, + &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); msgs::CommitmentUpdate { update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, @@ -3855,7 +3994,7 @@ impl Channel { /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`]. pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock ) -> Result where L::Target: Logger, @@ -3870,11 +4009,11 @@ impl Channel { if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_local_commitment_number == 0 { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned())); + return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned())); } if msg.next_remote_commitment_number > 0 { - let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); + let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { @@ -3883,8 +4022,8 @@ impl Channel { if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number { macro_rules! log_and_panic { ($err_msg: expr) => { - log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); - panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id)); + log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id)); + panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id)); } } log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ @@ -3914,7 +4053,7 @@ impl Channel { let shutdown_msg = self.get_outbound_shutdown(); - let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger); + let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger); if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. @@ -3933,7 +4072,7 @@ impl Channel { } // We have OurChannelReady set! - let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); return Ok(ReestablishResponses { channel_ready: Some(msgs::ChannelReady { channel_id: self.context.channel_id(), @@ -3973,7 +4112,7 @@ impl Channel { let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady - let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); Some(msgs::ChannelReady { channel_id: self.context.channel_id(), next_per_commitment_point, @@ -3983,9 +4122,9 @@ impl Channel { if msg.next_local_commitment_number == next_counterparty_commitment_number { if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); } else { - log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); } Ok(ReestablishResponses { @@ -3996,9 +4135,9 @@ impl Channel { }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { - log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { - log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); } if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 { @@ -4033,8 +4172,10 @@ impl Channel { // Propose a range from our current Background feerate to our Normal feerate plus our // force_close_avoidance_max_fee_satoshis. // If we fail to come to consensus, we'll have to force-close. - let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background); - let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum); + // Use NonAnchorChannelFee because this should be an estimate for a channel close + // that we don't expect to need fee bumping + let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() }; // The spec requires that (when the channel does not have anchors) we only send absolute @@ -4096,18 +4237,18 @@ impl Channel { pub fn maybe_propose_closing_signed( &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) - -> Result<(Option, Option), ChannelError> + -> Result<(Option, Option, Option), ChannelError> where F::Target: FeeEstimator, L::Target: Logger { if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() { - return Ok((None, None)); + return Ok((None, None, None)); } if !self.context.is_outbound() { if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() { return self.closing_signed(fee_estimator, &msg); } - return Ok((None, None)); + return Ok((None, None, None)); } let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator); @@ -4117,20 +4258,24 @@ impl Channel { log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)", our_min_fee, our_max_fee, total_fee_satoshis); - let sig = self.context.holder_signer - .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) - .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; + match &self.context.holder_signer { + ChannelSignerType::Ecdsa(ecdsa) => { + let sig = ecdsa + .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) + .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?; - self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); - Ok((Some(msgs::ClosingSigned { - channel_id: self.context.channel_id, - fee_satoshis: total_fee_satoshis, - signature: sig, - fee_range: Some(msgs::ClosingSignedFeeRange { - min_fee_satoshis: our_min_fee, - max_fee_satoshis: our_max_fee, - }), - }), None)) + self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone())); + Ok((Some(msgs::ClosingSigned { + channel_id: self.context.channel_id, + fee_satoshis: total_fee_satoshis, + signature: sig, + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: our_min_fee, + max_fee_satoshis: our_max_fee, + }), + }), None, None)) + } + } } // Marks a channel as waiting for a response from the counterparty. If it's not received @@ -4155,15 +4300,14 @@ impl Channel { *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS } - pub fn shutdown( + pub fn shutdown( &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown ) -> Result<(Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError> - where SP::Target: SignerProvider { if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); } - if self.context.channel_state < ChannelState::FundingSent as u32 { + if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { // Spec says we should fail the connection, not the channel, but that's nonsense, there // are plenty of reasons you may want to fail a channel pre-funding, and spec says you // can do that via error message without getting a connection fail anyway... @@ -4278,7 +4422,7 @@ impl Channel { pub fn closing_signed( &mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::ClosingSigned) - -> Result<(Option, Option), ChannelError> + -> Result<(Option, Option, Option), ChannelError> where F::Target: FeeEstimator { if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK { @@ -4300,7 +4444,7 @@ impl Channel { if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 { self.context.pending_counterparty_closing_signed = Some(msg.clone()); - return Ok((None, None)); + return Ok((None, None, None)); } let funding_redeemscript = self.context.get_funding_redeemscript(); @@ -4330,10 +4474,15 @@ impl Channel { assert!(self.context.shutdown_scriptpubkey.is_some()); if let Some((last_fee, sig)) = self.context.last_sent_closing_fee { if last_fee == msg.fee_satoshis { + let shutdown_result = ShutdownResult { + monitor_update: None, + dropped_outbound_htlcs: Vec::new(), + unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), + }; let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); self.context.channel_state = ChannelState::ShutdownComplete as u32; self.context.update_time_counter += 1; - return Ok((None, Some(tx))); + return Ok((None, Some(tx), Some(shutdown_result))); } } @@ -4347,27 +4496,37 @@ impl Channel { self.build_closing_transaction($new_fee, false) }; - let sig = self.context.holder_signer - .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; - - let signed_tx = if $new_fee == msg.fee_satoshis { - self.context.channel_state = ChannelState::ShutdownComplete as u32; - self.context.update_time_counter += 1; - let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig); - Some(tx) - } else { None }; - - self.context.last_sent_closing_fee = Some((used_fee, sig.clone())); - return Ok((Some(msgs::ClosingSigned { - channel_id: self.context.channel_id, - fee_satoshis: used_fee, - signature: sig, - fee_range: Some(msgs::ClosingSignedFeeRange { - min_fee_satoshis: our_min_fee, - max_fee_satoshis: our_max_fee, - }), - }), signed_tx)) + return match &self.context.holder_signer { + ChannelSignerType::Ecdsa(ecdsa) => { + let sig = ecdsa + .sign_closing_transaction(&closing_tx, &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; + let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis { + let shutdown_result = ShutdownResult { + monitor_update: None, + dropped_outbound_htlcs: Vec::new(), + unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), + }; + self.context.channel_state = ChannelState::ShutdownComplete as u32; + self.context.update_time_counter += 1; + let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig); + (Some(tx), Some(shutdown_result)) + } else { + (None, None) + }; + + self.context.last_sent_closing_fee = Some((used_fee, sig.clone())); + Ok((Some(msgs::ClosingSigned { + channel_id: self.context.channel_id, + fee_satoshis: used_fee, + signature: sig, + fee_range: Some(msgs::ClosingSignedFeeRange { + min_fee_satoshis: our_min_fee, + max_fee_satoshis: our_max_fee, + }), + }), signed_tx, shutdown_result)) + } + } } } @@ -4478,7 +4637,7 @@ impl Channel { } #[cfg(test)] - pub fn get_signer(&self) -> &Signer { + pub fn get_signer(&self) -> &ChannelSignerType<::Signer> { &self.context.holder_signer } @@ -4553,7 +4712,7 @@ impl Channel { pub fn is_awaiting_initial_mon_persist(&self) -> bool { if !self.is_awaiting_monitor_update() { return false; } if self.context.channel_state & - !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) + !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 { // If we're not a 0conf channel, we'll be waiting on a monitor update with only // FundingSent set, though our peer could have sent their channel_ready. @@ -4584,7 +4743,7 @@ impl Channel { /// Returns true if our channel_ready has been sent pub fn is_our_channel_ready(&self) -> bool { - (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32 + (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 } /// Returns true if our peer has either initiated or agreed to shut down the channel. @@ -4633,6 +4792,8 @@ impl Channel { return None; } + // Note that we don't include ChannelState::WaitingForBatch as we don't want to send + // channel_ready until the entire batch is ready. let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 { self.context.channel_state |= ChannelState::OurChannelReady as u32; @@ -4645,7 +4806,7 @@ impl Channel { // We got a reorg but not enough to trigger a force close, just ignore. false } else { - if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 { + if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 { // We should never see a funding transaction on-chain until we've received // funding_signed (if we're an outbound channel), or seen funding_generated (if we're // an inbound channel - before that we have no known funding TXID). The fuzzer, @@ -4663,7 +4824,7 @@ impl Channel { if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 { if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 { let next_per_commitment_point = - self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); + self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx); return Some(msgs::ChannelReady { channel_id: self.context.channel_id, next_per_commitment_point, @@ -4682,12 +4843,13 @@ impl Channel { /// In the second, we simply return an Err indicating we need to be force-closed now. pub fn transactions_confirmed( &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, - genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L + chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L ) -> Result<(Option, Option), ClosureReason> where NS::Target: NodeSigner, L::Target: Logger { + let mut msgs = (None, None); if let Some(funding_txo) = self.context.get_funding_txo() { for &(index_in_block, tx) in txdata.iter() { // Check if the transaction is the expected funding transaction, and if it is, @@ -4711,12 +4873,14 @@ impl Channel { return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() }); } else { if self.context.is_outbound() { - for input in tx.input.iter() { - if input.witness.is_empty() { - // We generated a malleable funding transaction, implying we've - // just exposed ourselves to funds loss to our counterparty. - #[cfg(not(fuzzing))] - panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!"); + if !tx.is_coin_base() { + for input in tx.input.iter() { + if input.witness.is_empty() { + // We generated a malleable funding transaction, implying we've + // just exposed ourselves to funds loss to our counterparty. + #[cfg(not(fuzzing))] + panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!"); + } } } } @@ -4727,25 +4891,32 @@ impl Channel { Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"), } } + // If this is a coinbase transaction and not a 0-conf channel + // we should update our min_depth to 100 to handle coinbase maturity + if tx.is_coin_base() && + self.context.minimum_depth.unwrap_or(0) > 0 && + self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY { + self.context.minimum_depth = Some(COINBASE_MATURITY); + } } // If we allow 1-conf funding, we may need to check for channel_ready here and // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). if let Some(channel_ready) = self.check_get_channel_ready(height) { - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id)); - let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger); - return Ok((Some(channel_ready), announcement_sigs)); + log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); + let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); + msgs = (Some(channel_ready), announcement_sigs); } } for inp in tx.input.iter() { if inp.previous_output == funding_txo.into_bitcoin_outpoint() { - log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id())); + log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id()); return Err(ClosureReason::CommitmentTxConfirmed); } } } } - Ok((None, None)) + Ok(msgs) } /// When a new block is connected, we check the height of the block against outbound holding @@ -4760,19 +4931,19 @@ impl Channel { /// May return some HTLCs (and their payment_hash) which have timed out and should be failed /// back. pub fn best_block_updated( - &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash, + &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> where NS::Target: NodeSigner, L::Target: Logger { - self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger) + self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger) } fn do_best_block_updated( &mut self, height: u32, highest_header_time: u32, - genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L + chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> where NS::Target: NodeSigner, @@ -4798,15 +4969,15 @@ impl Channel { self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); if let Some(channel_ready) = self.check_get_channel_ready(height) { - let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { - self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) + let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { + self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; - log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id)); + log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs)); } let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state >= ChannelState::ChannelReady as u32 || + if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 || (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 { let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1; if self.context.funding_tx_confirmation_height == 0 { @@ -4832,15 +5003,15 @@ impl Channel { } } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { - log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id)); + log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id); // If funding_tx_confirmed_in is unset, the channel must not be active - assert!(non_shutdown_state <= ChannelState::ChannelReady as u32); + assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32); assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); return Err(ClosureReason::FundingTimedOut); } - let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { - self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) + let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { + self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; Ok((None, timed_out_htlcs, announcement_sigs)) } @@ -4857,7 +5028,7 @@ impl Channel { // larger. If we don't know that time has moved forward, we can just set it to the last // time we saw and it will be ignored. let best_time = self.context.update_time_counter; - match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) { + match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) { Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => { assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?"); assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?"); @@ -4887,7 +5058,7 @@ impl Channel { /// /// [`ChannelReady`]: crate::ln::msgs::ChannelReady fn get_channel_announcement( - &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig, + &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, ) -> Result where NS::Target: NodeSigner { if !self.context.config.announced_channel { return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); @@ -4918,7 +5089,7 @@ impl Channel { } fn get_announcement_sigs( - &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig, + &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32, logger: &L ) -> Option where @@ -4942,8 +5113,8 @@ impl Channel { return None; } - log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id())); - let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) { + log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id()); + let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) { Ok(a) => a, Err(e) => { log_trace!(logger, "{:?}", e); @@ -4957,26 +5128,30 @@ impl Channel { }, Ok(v) => v }; - let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) { - Err(_) => { - log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!"); - return None; - }, - Ok(v) => v - }; - let short_channel_id = match self.context.get_short_channel_id() { - Some(scid) => scid, - None => return None, - }; + match &self.context.holder_signer { + ChannelSignerType::Ecdsa(ecdsa) => { + let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) { + Err(_) => { + log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!"); + return None; + }, + Ok(v) => v + }; + let short_channel_id = match self.context.get_short_channel_id() { + Some(scid) => scid, + None => return None, + }; - self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent; + self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent; - Some(msgs::AnnouncementSignatures { - channel_id: self.context.channel_id(), - short_channel_id, - node_signature: our_node_sig, - bitcoin_signature: our_bitcoin_sig, - }) + Some(msgs::AnnouncementSignatures { + channel_id: self.context.channel_id(), + short_channel_id, + node_signature: our_node_sig, + bitcoin_signature: our_bitcoin_sig, + }) + } + } } /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are @@ -4991,15 +5166,19 @@ impl Channel { let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?; - let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) - .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?; - Ok(msgs::ChannelAnnouncement { - node_signature_1: if were_node_one { our_node_sig } else { their_node_sig }, - node_signature_2: if were_node_one { their_node_sig } else { our_node_sig }, - bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig }, - bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig }, - contents: announcement, - }) + match &self.context.holder_signer { + ChannelSignerType::Ecdsa(ecdsa) => { + let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) + .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?; + Ok(msgs::ChannelAnnouncement { + node_signature_1: if were_node_one { our_node_sig } else { their_node_sig }, + node_signature_2: if were_node_one { their_node_sig } else { our_node_sig }, + bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig }, + bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig }, + contents: announcement, + }) + } + } } else { Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string())) } @@ -5009,7 +5188,7 @@ impl Channel { /// channel_announcement message which we can broadcast and storing our counterparty's /// signatures for later reconstruction/rebroadcast of the channel_announcement. pub fn announcement_signatures( - &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, + &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, msg: &msgs::AnnouncementSignatures, user_config: &UserConfig ) -> Result where NS::Target: NodeSigner { let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?; @@ -5039,7 +5218,7 @@ impl Channel { /// Gets a signed channel_announcement for this channel, if we previously received an /// announcement_signatures from our counterparty. pub fn get_signed_channel_announcement( - &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig + &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig ) -> Option where NS::Target: NodeSigner { if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { return None; @@ -5069,10 +5248,10 @@ impl Channel { let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap(); - log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id())); + log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id()); remote_last_secret } else { - log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id())); + log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id()); [0;32] }; self.mark_awaiting_response(); @@ -5188,7 +5367,8 @@ impl Channel { } let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0; - log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat, + log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}", + payment_hash, amount_msat, if force_holding_cell { "into holding cell" } else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" } else { "to peer" }); @@ -5244,13 +5424,13 @@ impl Channel { Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) } else { None }; if let Some(state) = new_state { - log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); htlc.state = state; } } for htlc in self.context.pending_outbound_htlcs.iter_mut() { if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state { - log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); + log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash); // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); mem::swap(outcome, &mut reason); @@ -5267,7 +5447,9 @@ impl Channel { } self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst; - let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger); + let (mut htlcs_ref, counterparty_commitment_tx) = + self.build_commitment_no_state_update(logger); + let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid(); let htlcs: Vec<(HTLCOutputInCommitment, Option>)> = htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect(); @@ -5282,17 +5464,23 @@ impl Channel { commitment_txid: counterparty_commitment_txid, htlc_outputs: htlcs.clone(), commitment_number: self.context.cur_counterparty_commitment_transaction_number, - their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap() + their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(), + feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()), + to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()), + to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()), }] }; self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; monitor_update } - fn build_commitment_no_state_update(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { + fn build_commitment_no_state_update(&self, logger: &L) + -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction) + where L::Target: Logger + { let counterparty_keys = self.context.build_remote_transaction_keys(); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); - let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); + let counterparty_commitment_tx = commitment_stats.tx; #[cfg(any(test, fuzzing))] { @@ -5312,7 +5500,7 @@ impl Channel { } } - (counterparty_commitment_txid, commitment_stats.htlcs_included) + (commitment_stats.htlcs_included, counterparty_commitment_tx) } /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed @@ -5325,40 +5513,45 @@ impl Channel { let counterparty_keys = self.context.build_remote_transaction_keys(); let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); - let (signature, htlc_signatures); - { - let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len()); - for &(ref htlc, _) in commitment_stats.htlcs_included.iter() { - htlcs.push(htlc); - } + match &self.context.holder_signer { + ChannelSignerType::Ecdsa(ecdsa) => { + let (signature, htlc_signatures); - let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?; - signature = res.0; - htlc_signatures = res.1; + { + let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len()); + for &(ref htlc, _) in commitment_stats.htlcs_included.iter() { + htlcs.push(htlc); + } - log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", - encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), - &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), - log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id())); + let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?; + signature = res.0; + htlc_signatures = res.1; + + log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}", + encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction), + &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()), + log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id()); + + for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { + log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", + encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), + encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)), + log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), + log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id()); + } + } - for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) { - log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}", - encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)), - encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)), - log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()), - log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id())); + Ok((msgs::CommitmentSigned { + channel_id: self.context.channel_id, + signature, + htlc_signatures, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) } } - - Ok((msgs::CommitmentSigned { - channel_id: self.context.channel_id, - signature, - htlc_signatures, - #[cfg(taproot)] - partial_signature_with_nonce: None, - }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) } /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment @@ -5386,17 +5579,20 @@ impl Channel { } } - pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> { - if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 { - return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string())); - } - self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo { + /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually + /// happened. + pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result { + let new_forwarding_info = Some(CounterpartyForwardingInfo { fee_base_msat: msg.contents.fee_base_msat, fee_proportional_millionths: msg.contents.fee_proportional_millionths, cltv_expiry_delta: msg.contents.cltv_expiry_delta }); + let did_change = self.context.counterparty_forwarding_info != new_forwarding_info; + if did_change { + self.context.counterparty_forwarding_info = new_forwarding_info; + } - Ok(()) + Ok(did_change) } /// Begins the shutdown process, getting a message for the remote peer and returning all @@ -5404,10 +5600,10 @@ impl Channel { /// /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no /// [`ChannelMonitorUpdate`] will be returned). - pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, + pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option) - -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> - where SP::Target: SignerProvider { + -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>, Option), APIError> + { for htlc in self.context.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()}); @@ -5432,7 +5628,7 @@ impl Channel { // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown // script is set, we just force-close and call it a day. let mut chan_closed = false; - if self.context.channel_state < ChannelState::FundingSent as u32 { + if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { chan_closed = true; } @@ -5461,11 +5657,18 @@ impl Channel { // From here on out, we may not fail! self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; - if self.context.channel_state < ChannelState::FundingSent as u32 { + let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { + let shutdown_result = ShutdownResult { + monitor_update: None, + dropped_outbound_htlcs: Vec::new(), + unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), + }; self.context.channel_state = ChannelState::ShutdownComplete as u32; + Some(shutdown_result) } else { self.context.channel_state |= ChannelState::LocalShutdownSent as u32; - } + None + }; self.context.update_time_counter += 1; let monitor_update = if update_shutdown_script { @@ -5501,7 +5704,7 @@ impl Channel { debug_assert!(!self.is_shutdown() || monitor_update.is_none(), "we can't both complete shutdown and return a monitor update"); - Ok((shutdown, monitor_update, dropped_outbound_htlcs)) + Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result)) } pub fn inflight_htlc_sources(&self) -> impl Iterator { @@ -5518,20 +5721,19 @@ impl Channel { } /// A not-yet-funded outbound (from holder) channel using V1 channel establishment. -pub(super) struct OutboundV1Channel { - pub context: ChannelContext, +pub(super) struct OutboundV1Channel where SP::Target: SignerProvider { + pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, } -impl OutboundV1Channel { - pub fn new( +impl OutboundV1Channel where SP::Target: SignerProvider { + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64 - ) -> Result, APIError> + ) -> Result, APIError> where ES::Target: EntropySource, - SP::Target: SignerProvider, - F::Target: FeeEstimator, + F::Target: FeeEstimator { let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay; let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id); @@ -5561,16 +5763,16 @@ impl OutboundV1Channel { let channel_type = Self::get_initial_channel_type(&config, their_features); debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config))); - let commitment_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() { - ConfirmationTarget::MempoolMinimum + let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() { + (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000) } else { - ConfirmationTarget::Normal + (ConfirmationTarget::NonAnchorChannelFee, 0) }; let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target); let value_to_self_msat = channel_value_satoshis * 1000 - push_msat; let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type); - if value_to_self_msat < commitment_tx_fee { + if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee { return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) }); } @@ -5595,7 +5797,7 @@ impl OutboundV1Channel { Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), }; - let temporary_channel_id = entropy_source.get_secure_random_bytes(); + let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source); Ok(Self { context: ChannelContext { @@ -5620,7 +5822,7 @@ impl OutboundV1Channel { latest_monitor_update_id: 0, - holder_signer, + holder_signer: ChannelSignerType::Ecdsa(holder_signer), shutdown_scriptpubkey, destination_script, @@ -5685,6 +5887,7 @@ impl OutboundV1Channel { channel_type_features: channel_type.clone() }, funding_transaction: None, + is_batch_funding: None, counterparty_cur_commitment_point: None, counterparty_prev_commitment_point: None, @@ -5729,8 +5932,13 @@ impl OutboundV1Channel { fn get_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { let counterparty_keys = self.context.build_remote_transaction_keys(); let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) + match &self.context.holder_signer { + // TODO (taproot|arik): move match into calling method for Taproot + ChannelSignerType::Ecdsa(ecdsa) => { + Ok(ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) + } + } } /// Updates channel state with knowledge of the funding transaction's txid/index, and generates @@ -5740,8 +5948,8 @@ impl OutboundV1Channel { /// Note that channel_id changes during this call! /// Do NOT broadcast the funding transaction until after a successful funding_signed call! /// If an Err is returned, it is a ChannelError::Close. - pub fn get_funding_created(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) - -> Result<(Channel, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger { + pub fn get_funding_created(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) + -> Result<(Channel, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger { if !self.context.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); } @@ -5755,7 +5963,7 @@ impl OutboundV1Channel { } self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters); let signature = match self.get_funding_created_signature(logger) { Ok(res) => res, @@ -5772,7 +5980,17 @@ impl OutboundV1Channel { self.context.channel_state = ChannelState::FundingCreated as u32; self.context.channel_id = funding_txo.to_channel_id(); + + // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100. + // We can skip this if it is a zero-conf channel. + if funding_transaction.is_coin_base() && + self.context.minimum_depth.unwrap_or(0) > 0 && + self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY { + self.context.minimum_depth = Some(COINBASE_MATURITY); + } + self.context.funding_transaction = Some(funding_transaction); + self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding); let channel = Channel { context: self.context, @@ -5817,7 +6035,7 @@ impl OutboundV1Channel { /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. pub(crate) fn maybe_handle_error_without_close( - &mut self, chain_hash: BlockHash, fee_estimator: &LowerBoundedFeeEstimator + &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator ) -> Result where F::Target: FeeEstimator @@ -5838,7 +6056,7 @@ impl OutboundV1Channel { // whatever reason. if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { self.context.channel_type.clear_anchors_zero_fee_htlc_tx(); - self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); + self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx()); } else if self.context.channel_type.supports_scid_privacy() { self.context.channel_type.clear_scid_privacy(); @@ -5849,7 +6067,7 @@ impl OutboundV1Channel { Ok(self.get_open_channel(chain_hash)) } - pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { + pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel { if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } @@ -5861,7 +6079,7 @@ impl OutboundV1Channel { panic!("Tried to send an open_channel for a channel that has already advanced"); } - let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); msgs::OpenChannel { @@ -6024,22 +6242,21 @@ impl OutboundV1Channel { } /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. -pub(super) struct InboundV1Channel { - pub context: ChannelContext, +pub(super) struct InboundV1Channel where SP::Target: SignerProvider { + pub context: ChannelContext, pub unfunded_context: UnfundedChannelContext, } -impl InboundV1Channel { +impl InboundV1Channel where SP::Target: SignerProvider { /// Creates a new channel from a remote sides' request for one. /// Assumes chain_hash has already been checked and corresponds with what we expect! - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures, their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig, current_chain_height: u32, logger: &L, is_0conf: bool, - ) -> Result, ChannelError> + ) -> Result, ChannelError> where ES::Target: EntropySource, - SP::Target: SignerProvider, F::Target: FeeEstimator, L::Target: Logger, { @@ -6109,7 +6326,7 @@ impl InboundV1Channel { if msg.htlc_minimum_msat >= full_channel_value_msat { return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat))); } - Channel::::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?; + Channel::::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?; let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); if msg.to_self_delay > max_counterparty_selected_contest_delay { @@ -6172,13 +6389,18 @@ impl InboundV1Channel { // check if the funder's amount for the initial commitment tx is sufficient // for full fee payment plus a few HTLCs to ensure the channel will be useful. + let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 + } else { + 0 + }; let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat; let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000; - if funders_amount_msat / 1000 < commitment_tx_fee { - return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee))); + if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee { + return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee))); } - let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee; + let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value; // While it's reasonable for us to not meet the channel reserve initially (if they don't // want to push much to us), our counterparty should always have more than our reserve. if to_remote_satoshis < holder_selected_channel_reserve_satoshis { @@ -6254,7 +6476,7 @@ impl InboundV1Channel { latest_monitor_update_id: 0, - holder_signer, + holder_signer: ChannelSignerType::Ecdsa(holder_signer), shutdown_scriptpubkey, destination_script, @@ -6323,6 +6545,7 @@ impl InboundV1Channel { channel_type_features: channel_type.clone() }, funding_transaction: None, + is_batch_funding: None, counterparty_cur_commitment_point: Some(msg.first_per_commitment_point), counterparty_prev_commitment_point: None, @@ -6389,7 +6612,7 @@ impl InboundV1Channel { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel fn generate_accept_channel_message(&self) -> msgs::AcceptChannel { - let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); + let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); let keys = self.context.get_holder_pubkeys(); msgs::AcceptChannel { @@ -6426,7 +6649,7 @@ impl InboundV1Channel { self.generate_accept_channel_message() } - fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { + fn funding_created_signature(&mut self, sig: &Signature, logger: &L) -> Result<(CommitmentTransaction, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger { let funding_script = self.context.get_funding_redeemscript(); let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); @@ -6439,7 +6662,7 @@ impl InboundV1Channel { log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), - encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id())); + encode::serialize_hex(&funding_script), &self.context.channel_id()); secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned()); } @@ -6449,20 +6672,24 @@ impl InboundV1Channel { let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) - .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; + match &self.context.holder_signer { + // TODO (arik): move match into calling method for Taproot + ChannelSignerType::Ecdsa(ecdsa) => { + let counterparty_signature = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx) + .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0; - // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. - Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature)) + // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish. + Ok((counterparty_initial_commitment_tx, initial_commitment_tx, counterparty_signature)) + } + } } - pub fn funding_created( + pub fn funding_created( mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(Channel, msgs::FundingSigned, ChannelMonitor), (Self, ChannelError)> + ) -> Result<(Channel, msgs::FundingSigned, ChannelMonitor<::Signer>), (Self, ChannelError)> where - SP::Target: SignerProvider, L::Target: Logger { if self.context.is_outbound() { @@ -6484,9 +6711,9 @@ impl InboundV1Channel { self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo); // This is an externally observable change before we finish all our checks. In particular // funding_created_signature may fail. - self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters); - let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { + let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) { Ok(res) => res, Err(ChannelError::Close(e)) => { self.context.channel_transaction_parameters.funding_outpoint = None; @@ -6507,7 +6734,7 @@ impl InboundV1Channel { self.context.counterparty_funding_pubkey() ); - if let Err(_) = self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new()) { + if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) { return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned()))); } @@ -6527,14 +6754,19 @@ impl InboundV1Channel { obscure_factor, holder_commitment_tx, best_block, self.context.counterparty_node_id); - channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger); + channel_monitor.provide_initial_counterparty_commitment_tx( + counterparty_initial_commitment_tx.trust().txid(), Vec::new(), + self.context.cur_counterparty_commitment_transaction_number, + self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw, + counterparty_initial_commitment_tx.to_broadcaster_value_sat(), + counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); self.context.channel_state = ChannelState::FundingSent as u32; self.context.channel_id = funding_txo.to_channel_id(); self.context.cur_counterparty_commitment_transaction_number -= 1; self.context.cur_holder_commitment_transaction_number -= 1; - log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id())); + log_info!(logger, "Generated funding_signed for peer for channel {}", &self.context.channel_id()); // Promote the channel to a full-fledged one now that we have updated the state and have a // `ChannelMonitor`. @@ -6611,7 +6843,7 @@ impl Readable for AnnouncementSigsState { } } -impl Writeable for Channel { +impl Writeable for Channel where SP::Target: SignerProvider { fn write(&self, writer: &mut W) -> Result<(), io::Error> { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. @@ -6636,7 +6868,8 @@ impl Writeable for Channel { self.context.latest_monitor_update_id.write(writer)?; let mut key_data = VecWriter(Vec::new()); - self.context.holder_signer.write(&mut key_data)?; + // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers. + self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?; assert!(key_data.0.len() < core::usize::MAX); assert!(key_data.0.len() < core::u32::MAX as usize); (key_data.0.len() as u32).write(writer)?; @@ -6928,6 +7161,7 @@ impl Writeable for Channel { (31, channel_pending_event_emitted, option), (35, pending_outbound_skimmed_fees, optional_vec), (37, holding_cell_skimmed_fees, optional_vec), + (38, self.context.is_batch_funding, option), }); Ok(()) @@ -6935,7 +7169,7 @@ impl Writeable for Channel { } const MAX_ALLOC_SIZE: usize = 64*1024; -impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<::Signer> +impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel where ES::Target: EntropySource, SP::Target: SignerProvider @@ -7150,7 +7384,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch }; let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?; - let funding_transaction = Readable::read(reader)?; + let funding_transaction: Option = Readable::read(reader)?; let counterparty_cur_commitment_point = Readable::read(reader)?; @@ -7203,7 +7437,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut user_id_high_opt: Option = None; let mut channel_keys_id: Option<[u8; 32]> = None; - let mut temporary_channel_id: Option<[u8; 32]> = None; + let mut temporary_channel_id: Option = None; let mut holder_max_accepted_htlcs: Option = None; let mut blocked_monitor_updates = Some(Vec::new()); @@ -7211,6 +7445,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut pending_outbound_skimmed_fees_opt: Option>> = None; let mut holding_cell_skimmed_fees_opt: Option>> = None; + let mut is_batch_funding: Option<()> = None; + read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), @@ -7236,6 +7472,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (31, channel_pending_event_emitted, option), (35, pending_outbound_skimmed_fees_opt, optional_vec), (37, holding_cell_skimmed_fees_opt, optional_vec), + (38, is_batch_funding, option), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -7243,7 +7480,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // If we've gotten to the funding stage of the channel, populate the signer with its // required channel parameters. let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state >= (ChannelState::FundingCreated as u32) { + if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) { holder_signer.provide_channel_parameters(&channel_parameters); } (channel_keys_id, holder_signer) @@ -7334,7 +7571,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch latest_monitor_update_id, - holder_signer, + holder_signer: ChannelSignerType::Ecdsa(holder_signer), shutdown_scriptpubkey, destination_script, @@ -7393,6 +7630,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_transaction_parameters: channel_parameters, funding_transaction, + is_batch_funding, counterparty_cur_commitment_point, counterparty_prev_commitment_point, @@ -7437,16 +7675,16 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch #[cfg(test)] mod tests { use std::cmp; + use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::transaction::{Transaction, TxOut}; - use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::opcodes; use bitcoin::network::constants::Network; use hex; use crate::ln::PaymentHash; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use crate::ln::channel::InitFeatures; - use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; + use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; @@ -7459,10 +7697,9 @@ mod tests { use crate::chain::transaction::OutPoint; use crate::routing::router::Path; use crate::util::config::UserConfig; - use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::errors::APIError; use crate::util::test_utils; - use crate::util::test_utils::OnGetShutdownScriptpubkey; + use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface}; use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature}; use bitcoin::secp256k1::ffi::Signature as FFISignature; use bitcoin::secp256k1::{SecretKey,PublicKey}; @@ -7495,7 +7732,7 @@ mod tests { // arithmetic, causing a panic with debug assertions enabled. let fee_est = TestFeeEstimator { fee_est: 42 }; let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est); - assert!(Channel::::check_remote_fee( + assert!(Channel::<&TestKeysInterface>::check_remote_fee( &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator, u32::max_value(), None, &&test_utils::TestLogger::new()).is_err()); } @@ -7556,7 +7793,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match OutboundV1Channel::::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) { + match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -7579,12 +7816,12 @@ mod tests { let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = OutboundV1Channel::::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. fee_est.fee_est = 500; - let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); assert_eq!(open_channel_msg.feerate_per_kw, original_fee); } @@ -7606,13 +7843,13 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. - let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); + let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(); @@ -7626,7 +7863,7 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed @@ -7687,7 +7924,7 @@ mod tests { let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = OutboundV1Channel::::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type()); let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type()); @@ -7728,7 +7965,7 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let best_block = BestBlock::from_network(network); - let chain_hash = best_block.block_hash(); + let chain_hash = ChainHash::using_genesis_block(network); let keys_provider = test_utils::TestKeysInterface::new(&seed, network); // Go through the flow of opening a channel between two nodes. @@ -7736,12 +7973,12 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); + let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel let accept_channel_msg = node_b_chan.accept_inbound_channel(); @@ -7753,7 +7990,7 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed @@ -7761,7 +7998,7 @@ mod tests { // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. - node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger); + assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); let msg = node_b_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number @@ -7769,7 +8006,7 @@ mod tests { // Check that the commitment point in Node A's channel_reestablish message // is sane. - node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger); + assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); let msg = node_a_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number @@ -7799,52 +8036,52 @@ mod tests { // Test that `OutboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let chan_1 = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); + let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap(); let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); + let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap(); let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); - let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash()); + let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network)); // Test that `InboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound - 1 (2%) of the `channel_value`. - let chan_3 = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000; assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_4 = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000; assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64); // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); + let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap(); let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); + let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap(); let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_7 = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000; assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64); // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_8 = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap(); let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000; assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat); } @@ -7884,17 +8121,17 @@ mod tests { let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let chan = OutboundV1Channel::::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); + let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); - let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash()); + let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network)); let mut inbound_node_config = UserConfig::default(); inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32; if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 { - let chan_inbound_node = InboundV1Channel::::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap(); + let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap(); let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64); @@ -7902,7 +8139,7 @@ mod tests { assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve); } else { // Channel Negotiations failed - let result = InboundV1Channel::::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false); + let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false); assert!(result.is_err()); } } @@ -7915,19 +8152,19 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let best_block = BestBlock::from_network(network); - let chain_hash = genesis_block(network).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(network); let keys_provider = test_utils::TestKeysInterface::new(&seed, network); // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. - let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let mut node_b_chan = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); + let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); // Node B --> Node A: accept channel, explicitly setting B's dust limit. let mut accept_channel_msg = node_b_chan.accept_inbound_channel(); @@ -7941,7 +8178,7 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed @@ -7963,7 +8200,7 @@ mod tests { }, signature: Signature::from(unsafe { FFISignature::new() }) }; - node_a_chan.channel_update(&update).unwrap(); + assert!(node_a_chan.channel_update(&update).unwrap()); // The counterparty can send an update with a higher minimum HTLC, but that shouldn't // change our official htlc_minimum_msat. @@ -7976,6 +8213,8 @@ mod tests { }, None => panic!("expected counterparty forwarding info to be Some") } + + assert!(!node_a_chan.channel_update(&update).unwrap()); } #[cfg(feature = "_test_vectors")] @@ -7987,7 +8226,7 @@ mod tests { use bitcoin::hashes::hex::FromHex; use bitcoin::hash_types::Txid; use bitcoin::secp256k1::Message; - use crate::sign::EcdsaChannelSigner; + use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner}; use crate::ln::PaymentPreimage; use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys}; use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; @@ -8021,7 +8260,7 @@ mod tests { let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.announced_channel = false; - let mut chan = OutboundV1Channel::::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test + let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test chan.context.holder_dust_limit_satoshis = 546; chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel @@ -8054,10 +8293,10 @@ mod tests { // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not // derived from a commitment_seed, so instead we copy it here and call // build_commitment_transaction. - let delayed_payment_base = &chan.context.holder_signer.pubkeys().delayed_payment_basepoint; + let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint; let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret); - let htlc_basepoint = &chan.context.holder_signer.pubkeys().htlc_basepoint; + let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint; let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint); macro_rules! test_commitment { @@ -8109,10 +8348,10 @@ mod tests { commitment_tx.clone(), counterparty_signature, counterparty_htlc_sigs, - &chan.context.holder_signer.pubkeys().funding_pubkey, + &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey, chan.context.counterparty_funding_pubkey() ); - let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap(); + let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap(); assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig"); let funding_redeemscript = chan.context.get_funding_redeemscript(); @@ -8120,14 +8359,14 @@ mod tests { assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx"); // ((htlc, counterparty_sig), (index, holder_sig)) - let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate()); + let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter(); $({ log_trace!(logger, "verifying htlc {}", $htlc_idx); let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap(); let ref htlc = htlcs[$htlc_idx]; - let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw, + let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw, chan.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key); let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys); @@ -8147,20 +8386,32 @@ mod tests { assert!(preimage.is_some()); } - let htlc_sig = htlc_sig_iter.next().unwrap(); + let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap(); + let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor { + channel_derivation_parameters: ChannelDerivationParameters { + value_satoshis: chan.context.channel_value_satoshis, + keys_id: chan.context.channel_keys_id, + transaction_parameters: chan.context.channel_transaction_parameters.clone(), + }, + commitment_txid: trusted_tx.txid(), + per_commitment_number: trusted_tx.commitment_number(), + per_commitment_point: trusted_tx.per_commitment_point(), + feerate_per_kw: trusted_tx.feerate_per_kw(), + htlc: htlc.clone(), + preimage: preimage.clone(), + counterparty_sig: *htlc_counterparty_sig, + }, &secp_ctx).unwrap(); let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 }; - assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx + num_anchors), "output index"); + assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index"); let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap(); - assert_eq!(signature, *(htlc_sig.1).1, "htlc sig"); - let index = (htlc_sig.1).0; - let channel_parameters = chan.context.channel_transaction_parameters.as_holder_broadcastable(); + assert_eq!(signature, htlc_holder_sig, "htlc sig"); let trusted_tx = holder_commitment_tx.trust(); - log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage)))); - assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..], - hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx"); + htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage); + log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx))); + assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx"); })* - assert!(htlc_sig_iter.next().is_none()); + assert!(htlc_counterparty_sig_iter.next().is_none()); } } } @@ -8754,16 +9005,16 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = OutboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, + let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap(); let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); - let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); open_channel_msg.channel_type = Some(channel_type_features); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); - let res = InboundV1Channel::::new(&feeest, &&keys_provider, &&keys_provider, + let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false); assert!(res.is_ok()); @@ -8787,7 +9038,7 @@ mod tests { // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both // need to signal it. - let channel_a = OutboundV1Channel::::new( + let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, &config, 0, 42 @@ -8798,13 +9049,13 @@ mod tests { expected_channel_type.set_static_remote_key_required(); expected_channel_type.set_anchors_zero_fee_htlc_tx_required(); - let channel_a = OutboundV1Channel::::new( + let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); - let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); - let channel_b = InboundV1Channel::::new( + let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); + let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false @@ -8835,18 +9086,18 @@ mod tests { let raw_init_features = static_remote_key_required | simple_anchors_required; let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec()); - let channel_a = OutboundV1Channel::::new( + let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); // Set `channel_type` to `None` to force the implicit feature negotiation. - let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); + let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); open_channel_msg.channel_type = None; // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts // `static_remote_key`, it will fail the channel. - let channel_b = InboundV1Channel::::new( + let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors, &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false @@ -8881,15 +9132,15 @@ mod tests { // First, we'll try to open a channel between A and B where A requests a channel type for // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by // B as it's not supported by LDK. - let channel_a = OutboundV1Channel::::new( + let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); - let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); + let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); - let res = InboundV1Channel::::new( + let res = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &simple_anchors_init, &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false @@ -8900,14 +9151,14 @@ mod tests { // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the // original `option_anchors` feature, which should be rejected by A as it's not supported by // LDK. - let mut channel_a = OutboundV1Channel::::new( + let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init, 10000000, 100000, 42, &config, 0, 42 ).unwrap(); - let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); - let channel_b = InboundV1Channel::::new( + let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false @@ -8921,4 +9172,146 @@ mod tests { ); assert!(res.is_err()); } + + #[test] + fn test_waiting_for_batch() { + let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000}); + let logger = test_utils::TestLogger::new(); + let secp_ctx = Secp256k1::new(); + let seed = [42; 32]; + let network = Network::Testnet; + let best_block = BestBlock::from_network(network); + let chain_hash = ChainHash::using_genesis_block(network); + let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + + let mut config = UserConfig::default(); + // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a + // channel in a batch before all channels are ready. + config.channel_handshake_limits.trust_own_funding_0conf = true; + + // Create a channel from node a to node b that will be part of batch funding. + let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new( + &feeest, + &&keys_provider, + &&keys_provider, + node_b_node_id, + &channelmanager::provided_init_features(&config), + 10000000, + 100000, + 42, + &config, + 0, + 42, + ).unwrap(); + + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); + let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new( + &feeest, + &&keys_provider, + &&keys_provider, + node_b_node_id, + &channelmanager::provided_channel_type_features(&config), + &channelmanager::provided_init_features(&config), + &open_channel_msg, + 7, + &config, + 0, + &&logger, + true, // Allow node b to send a 0conf channel_ready. + ).unwrap(); + + let accept_channel_msg = node_b_chan.accept_inbound_channel(); + node_a_chan.accept_channel( + &accept_channel_msg, + &config.channel_handshake_limits, + &channelmanager::provided_init_features(&config), + ).unwrap(); + + // Fund the channel with a batch funding transaction. + let output_script = node_a_chan.context.get_funding_redeemscript(); + let tx = Transaction { + version: 1, + lock_time: PackedLockTime::ZERO, + input: Vec::new(), + output: vec![ + TxOut { + value: 10000000, script_pubkey: output_script.clone(), + }, + TxOut { + value: 10000000, script_pubkey: Builder::new().into_script(), + }, + ]}; + let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created( + tx.clone(), + funding_outpoint, + true, + &&logger, + ).map_err(|_| ()).unwrap(); + let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created( + &funding_created_msg, + best_block, + &&keys_provider, + &&logger, + ).map_err(|_| ()).unwrap(); + let node_b_updates = node_b_chan.monitor_updating_restored( + &&logger, + &&keys_provider, + chain_hash, + &config, + 0, + ); + + // Receive funding_signed, but the channel will be configured to hold sending channel_ready and + // broadcasting the funding transaction until the batch is ready. + let _ = node_a_chan.funding_signed( + &funding_signed_msg, + best_block, + &&keys_provider, + &&logger, + ).unwrap(); + let node_a_updates = node_a_chan.monitor_updating_restored( + &&logger, + &&keys_provider, + chain_hash, + &config, + 0, + ); + // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set, + // as the funding transaction depends on all channels in the batch becoming ready. + assert!(node_a_updates.channel_ready.is_none()); + assert!(node_a_updates.funding_broadcastable.is_none()); + assert_eq!( + node_a_chan.context.channel_state, + ChannelState::FundingSent as u32 | + ChannelState::WaitingForBatch as u32, + ); + + // It is possible to receive a 0conf channel_ready from the remote node. + node_a_chan.channel_ready( + &node_b_updates.channel_ready.unwrap(), + &&keys_provider, + chain_hash, + &config, + &best_block, + &&logger, + ).unwrap(); + assert_eq!( + node_a_chan.context.channel_state, + ChannelState::FundingSent as u32 | + ChannelState::WaitingForBatch as u32 | + ChannelState::TheirChannelReady as u32, + ); + + // Clear the ChannelState::WaitingForBatch only when called by ChannelManager. + node_a_chan.set_batch_ready(); + assert_eq!( + node_a_chan.context.channel_state, + ChannelState::FundingSent as u32 | + ChannelState::TheirChannelReady as u32, + ); + assert!(node_a_chan.check_get_channel_ready(0).is_some()); + } }