X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=b9a40df91a0117ceb2ff6c9f0f80b16d6fe109c6;hb=62fd36d795f99887260aa962a0e8f30e3b5504fa;hp=da1364021cefe99e9f1b63e2ecd215d8f67b438e;hpb=2c4f82478e83ccfc8948b24d7cb14ef9913e011f;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index da136402..b9a40df9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -7,6 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. +use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::{Script,Builder}; use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType}; use bitcoin::util::sighash; @@ -66,6 +67,8 @@ pub struct ChannelValueStat { } pub struct AvailableBalances { + /// The amount that would go to us if we close the channel, ignoring any on-chain fees. + pub balance_msat: u64, /// Total amount available for our counterparty to send to us. pub inbound_capacity_msat: u64, /// Total amount available for us to send to our counterparty. @@ -300,9 +303,24 @@ enum ChannelState { /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about /// to drop us, but we store this anyway. ShutdownComplete = 4096, + /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the + /// broadcasting of the funding transaction is being held until all channels in the batch + /// have received funding_signed and have their monitors persisted. + WaitingForBatch = 1 << 13, } -const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32; -const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32; +const BOTH_SIDES_SHUTDOWN_MASK: u32 = + ChannelState::LocalShutdownSent as u32 | + ChannelState::RemoteShutdownSent as u32; +const MULTI_STATE_FLAGS: u32 = + BOTH_SIDES_SHUTDOWN_MASK | + ChannelState::PeerDisconnected as u32 | + ChannelState::MonitorUpdateInProgress as u32; +const STATE_FLAGS: u32 = + MULTI_STATE_FLAGS | + ChannelState::TheirChannelReady as u32 | + ChannelState::OurChannelReady as u32 | + ChannelState::AwaitingRemoteRevoke as u32 | + ChannelState::WaitingForBatch as u32; pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; @@ -527,12 +545,15 @@ pub(super) struct ReestablishResponses { /// The return type of `force_shutdown` /// -/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple -/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this -/// channel's counterparty_node_id and channel_id). +/// Contains a tuple with the following: +/// - An optional (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple +/// - A list of HTLCs to fail back in the form of the (source, payment hash, and this channel's +/// counterparty_node_id and channel_id). +/// - An optional transaction id identifying a corresponding batch funding transaction. pub(crate) type ShutdownResult = ( Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, - Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)> + Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>, + Option ); /// If the majority of the channels funds are to the fundee and the initiator holds only just @@ -594,6 +615,9 @@ pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2; /// exceeding this age limit will be force-closed and purged from memory. pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60; +/// Number of blocks needed for an output from a coinbase transaction to be spendable. +pub(crate) const COINBASE_MATURITY: u32 = 100; + struct PendingChannelMonitorUpdate { update: ChannelMonitorUpdate, } @@ -602,6 +626,35 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { (0, update, required), }); +/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of +/// its variants containing an appropriate channel struct. +pub(super) enum ChannelPhase where SP::Target: SignerProvider { + UnfundedOutboundV1(OutboundV1Channel), + UnfundedInboundV1(InboundV1Channel), + Funded(Channel), +} + +impl<'a, SP: Deref> ChannelPhase where + SP::Target: SignerProvider, + ::Signer: ChannelSigner, +{ + pub fn context(&'a self) -> &'a ChannelContext { + match self { + ChannelPhase::Funded(chan) => &chan.context, + ChannelPhase::UnfundedOutboundV1(chan) => &chan.context, + ChannelPhase::UnfundedInboundV1(chan) => &chan.context, + } + } + + pub fn context_mut(&'a mut self) -> &'a mut ChannelContext { + match self { + ChannelPhase::Funded(ref mut chan) => &mut chan.context, + ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context, + ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context, + } + } +} + /// Contains all state common to unfunded inbound/outbound channels. pub(super) struct UnfundedChannelContext { /// A counter tracking how many ticks have elapsed since this unfunded channel was @@ -789,6 +842,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { pub(crate) channel_transaction_parameters: ChannelTransactionParameters, funding_transaction: Option, + is_batch_funding: Option<()>, counterparty_cur_commitment_point: Option, counterparty_prev_commitment_point: Option, @@ -913,7 +967,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// Returns true if we've ever received a message from the remote end for this Channel pub fn have_received_message(&self) -> bool { - self.channel_state > (ChannelState::OurInitSent as u32) + self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32) } /// Returns true if this channel is fully established and not known to be closing. @@ -1129,7 +1183,7 @@ impl ChannelContext where SP::Target: SignerProvider { // Checks whether we should emit a `ChannelPending` event. pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool { - self.is_funding_initiated() && !self.channel_pending_event_emitted + self.is_funding_broadcast() && !self.channel_pending_event_emitted } // Returns whether we already emitted a `ChannelPending` event. @@ -1188,9 +1242,11 @@ impl ChannelContext where SP::Target: SignerProvider { did_channel_update } - /// Returns true if funding_created was sent/received. - pub fn is_funding_initiated(&self) -> bool { - self.channel_state >= ChannelState::FundingSent as u32 + /// Returns true if funding_signed was sent/received and the + /// funding transaction has been broadcast if necessary. + pub fn is_funding_broadcast(&self) -> bool { + self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && + self.channel_state & ChannelState::WaitingForBatch as u32 == 0 } /// Transaction nomenclature is somewhat confusing here as there are many different cases - a @@ -1602,6 +1658,14 @@ impl ChannelContext where SP::Target: SignerProvider { let inbound_stats = context.get_inbound_pending_htlc_stats(None); let outbound_stats = context.get_outbound_pending_htlc_stats(None); + let mut balance_msat = context.value_to_self_msat; + for ref htlc in context.pending_inbound_htlcs.iter() { + if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state { + balance_msat += htlc.amount_msat; + } + } + balance_msat -= outbound_stats.pending_htlcs_value_msat; + let outbound_capacity_msat = context.value_to_self_msat .saturating_sub(outbound_stats.pending_htlcs_value_msat) .saturating_sub( @@ -1718,6 +1782,7 @@ impl ChannelContext where SP::Target: SignerProvider { outbound_capacity_msat, next_outbound_htlc_limit_msat: available_capacity_msat, next_outbound_htlc_minimum_msat, + balance_msat, } } @@ -1920,15 +1985,41 @@ impl ChannelContext where SP::Target: SignerProvider { res } - /// Returns transaction if there is pending funding transaction that is yet to broadcast - pub fn unbroadcasted_funding(&self) -> Option { - if self.channel_state & (ChannelState::FundingCreated as u32) != 0 { - self.funding_transaction.clone() + fn if_unbroadcasted_funding(&self, f: F) -> Option + where F: Fn() -> Option { + if self.channel_state & ChannelState::FundingCreated as u32 != 0 || + self.channel_state & ChannelState::WaitingForBatch as u32 != 0 { + f() } else { None } } + /// Returns the transaction if there is a pending funding transaction that is yet to be + /// broadcast. + pub fn unbroadcasted_funding(&self) -> Option { + self.if_unbroadcasted_funding(|| self.funding_transaction.clone()) + } + + /// Returns the transaction ID if there is a pending funding transaction that is yet to be + /// broadcast. + pub fn unbroadcasted_funding_txid(&self) -> Option { + self.if_unbroadcasted_funding(|| + self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid) + ) + } + + /// Returns whether the channel is funded in a batch. + pub fn is_batch_funding(&self) -> bool { + self.is_batch_funding.is_some() + } + + /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be + /// broadcast. + pub fn unbroadcasted_batch_funding_txid(&self) -> Option { + self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding()) + } + /// Gets the latest commitment transaction and any dependent transactions for relay (forcing /// shutdown of this channel - no more calls into this Channel may be made afterwards except /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). @@ -1969,10 +2060,11 @@ impl ChannelContext where SP::Target: SignerProvider { })) } else { None } } else { None }; + let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid(); self.channel_state = ChannelState::ShutdownComplete as u32; self.update_time_counter += 1; - (monitor_update, dropped_outbound_htlcs) + (monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid) } } @@ -2033,11 +2125,6 @@ fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_featur (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000 } -// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking -// has been completed, and then turn into a Channel to get compiler-time enforcement of things like -// calling channel_id() before we're set up or things like get_funding_signed on an -// inbound channel. -// // Holder designates channel data owned for the benefit of the user client. // Counterparty designates channel data owned by the another channel participant entity. pub(super) struct Channel where SP::Target: SignerProvider { @@ -2547,7 +2634,11 @@ impl Channel where counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update! - self.context.channel_state = ChannelState::FundingSent as u32; + if self.context.is_batch_funding() { + self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32; + } else { + self.context.channel_state = ChannelState::FundingSent as u32; + } self.context.cur_holder_commitment_transaction_number -= 1; self.context.cur_counterparty_commitment_transaction_number -= 1; @@ -2558,11 +2649,20 @@ impl Channel where Ok(channel_monitor) } + /// Updates the state of the channel to indicate that all channels in the batch have received + /// funding_signed and persisted their monitors. + /// The funding transaction is consequently allowed to be broadcast, and the channel can be + /// treated as a non-batch channel going forward. + pub fn set_batch_ready(&mut self) { + self.context.is_batch_funding = None; + self.context.channel_state &= !(ChannelState::WaitingForBatch as u32); + } + /// Handles a channel_ready message from our peer. If we've already sent our channel_ready /// and the channel is now usable (and public), this may generate an announcement_signatures to /// reply with. pub fn channel_ready( - &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash, + &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock, logger: &L ) -> Result, ChannelError> where @@ -2585,7 +2685,13 @@ impl Channel where let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state == ChannelState::FundingSent as u32 { + // Our channel_ready shouldn't have been sent if we are waiting for other channels in the + // batch, but we can receive channel_ready messages. + debug_assert!( + non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 || + non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0 + ); + if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 { self.context.channel_state |= ChannelState::TheirChannelReady as u32; } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) { self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS); @@ -2627,7 +2733,7 @@ impl Channel where log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id()); - Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) + Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger)) } pub fn update_add_htlc( @@ -3084,7 +3190,7 @@ impl Channel where ) -> (Option, Vec<(HTLCSource, PaymentHash)>) where F::Target: FeeEstimator, L::Target: Logger { - if self.context.channel_state >= ChannelState::ChannelReady as u32 && + if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { self.free_holding_cell_htlcs(fee_estimator, logger) } else { (None, Vec::new()) } @@ -3558,17 +3664,17 @@ impl Channel where /// resent. /// No further message handling calls may be made until a channel_reestablish dance has /// completed. - pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) where L::Target: Logger { + /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately. + pub fn remove_uncommitted_htlcs_and_mark_paused(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger { assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0); - if self.context.channel_state < ChannelState::FundingSent as u32 { - self.context.channel_state = ChannelState::ShutdownComplete as u32; - return; + if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { + return Err(()); } if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) { // While the below code should be idempotent, it's simpler to just return early, as // redundant disconnect events can fire, though they should be rare. - return; + return Ok(()); } if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed { @@ -3629,6 +3735,7 @@ impl Channel where self.context.channel_state |= ChannelState::PeerDisconnected as u32; log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id()); + Ok(()) } /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. @@ -3660,7 +3767,7 @@ impl Channel where /// successfully and we should restore normal operation. Returns messages which should be sent /// to the remote side. pub fn monitor_updating_restored( - &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash, + &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32 ) -> MonitorRestoreUpdates where @@ -3674,12 +3781,12 @@ impl Channel where // (re-)broadcast the funding transaction as we may have declined to broadcast it when we // first received the funding_signed. let mut funding_broadcastable = - if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { + if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 { self.context.funding_transaction.take() } else { None }; // That said, if the funding transaction is already confirmed (ie we're active with a // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx. - if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) { + if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) { funding_broadcastable = None; } @@ -3701,7 +3808,7 @@ impl Channel where }) } else { None }; - let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger); + let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger); let mut accepted_htlcs = Vec::new(); mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards); @@ -3871,7 +3978,7 @@ impl Channel where /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`]. pub fn channel_reestablish( &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS, - genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock + chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock ) -> Result where L::Target: Logger, @@ -3886,7 +3993,7 @@ impl Channel where if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_local_commitment_number == 0 { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned())); + return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned())); } if msg.next_remote_commitment_number > 0 { @@ -3930,7 +4037,7 @@ impl Channel where let shutdown_msg = self.get_outbound_shutdown(); - let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger); + let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger); if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 { // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. @@ -4182,7 +4289,7 @@ impl Channel where if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned())); } - if self.context.channel_state < ChannelState::FundingSent as u32 { + if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { // Spec says we should fail the connection, not the channel, but that's nonsense, there // are plenty of reasons you may want to fail a channel pre-funding, and spec says you // can do that via error message without getting a connection fail anyway... @@ -4576,7 +4683,7 @@ impl Channel where pub fn is_awaiting_initial_mon_persist(&self) -> bool { if !self.is_awaiting_monitor_update() { return false; } if self.context.channel_state & - !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) + !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 { // If we're not a 0conf channel, we'll be waiting on a monitor update with only // FundingSent set, though our peer could have sent their channel_ready. @@ -4607,7 +4714,7 @@ impl Channel where /// Returns true if our channel_ready has been sent pub fn is_our_channel_ready(&self) -> bool { - (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32 + (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 } /// Returns true if our peer has either initiated or agreed to shut down the channel. @@ -4656,6 +4763,8 @@ impl Channel where return None; } + // Note that we don't include ChannelState::WaitingForBatch as we don't want to send + // channel_ready until the entire batch is ready. let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 { self.context.channel_state |= ChannelState::OurChannelReady as u32; @@ -4668,7 +4777,7 @@ impl Channel where // We got a reorg but not enough to trigger a force close, just ignore. false } else { - if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 { + if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 { // We should never see a funding transaction on-chain until we've received // funding_signed (if we're an outbound channel), or seen funding_generated (if we're // an inbound channel - before that we have no known funding TXID). The fuzzer, @@ -4705,12 +4814,13 @@ impl Channel where /// In the second, we simply return an Err indicating we need to be force-closed now. pub fn transactions_confirmed( &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, - genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L + chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L ) -> Result<(Option, Option), ClosureReason> where NS::Target: NodeSigner, L::Target: Logger { + let mut msgs = (None, None); if let Some(funding_txo) = self.context.get_funding_txo() { for &(index_in_block, tx) in txdata.iter() { // Check if the transaction is the expected funding transaction, and if it is, @@ -4734,12 +4844,14 @@ impl Channel where return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() }); } else { if self.context.is_outbound() { - for input in tx.input.iter() { - if input.witness.is_empty() { - // We generated a malleable funding transaction, implying we've - // just exposed ourselves to funds loss to our counterparty. - #[cfg(not(fuzzing))] - panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!"); + if !tx.is_coin_base() { + for input in tx.input.iter() { + if input.witness.is_empty() { + // We generated a malleable funding transaction, implying we've + // just exposed ourselves to funds loss to our counterparty. + #[cfg(not(fuzzing))] + panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!"); + } } } } @@ -4750,14 +4862,21 @@ impl Channel where Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"), } } + // If this is a coinbase transaction and not a 0-conf channel + // we should update our min_depth to 100 to handle coinbase maturity + if tx.is_coin_base() && + self.context.minimum_depth.unwrap_or(0) > 0 && + self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY { + self.context.minimum_depth = Some(COINBASE_MATURITY); + } } // If we allow 1-conf funding, we may need to check for channel_ready here and // send it immediately instead of waiting for a best_block_updated call (which // may have already happened for this block). if let Some(channel_ready) = self.check_get_channel_ready(height) { log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); - let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger); - return Ok((Some(channel_ready), announcement_sigs)); + let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger); + msgs = (Some(channel_ready), announcement_sigs); } } for inp in tx.input.iter() { @@ -4768,7 +4887,7 @@ impl Channel where } } } - Ok((None, None)) + Ok(msgs) } /// When a new block is connected, we check the height of the block against outbound holding @@ -4783,19 +4902,19 @@ impl Channel where /// May return some HTLCs (and their payment_hash) which have timed out and should be failed /// back. pub fn best_block_updated( - &mut self, height: u32, highest_header_time: u32, genesis_block_hash: BlockHash, + &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> where NS::Target: NodeSigner, L::Target: Logger { - self.do_best_block_updated(height, highest_header_time, Some((genesis_block_hash, node_signer, user_config)), logger) + self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger) } fn do_best_block_updated( &mut self, height: u32, highest_header_time: u32, - genesis_node_signer: Option<(BlockHash, &NS, &UserConfig)>, logger: &L + chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L ) -> Result<(Option, Vec<(HTLCSource, PaymentHash)>, Option), ClosureReason> where NS::Target: NodeSigner, @@ -4821,15 +4940,15 @@ impl Channel where self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time); if let Some(channel_ready) = self.check_get_channel_ready(height) { - let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { - self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) + let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { + self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id); return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs)); } let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state >= ChannelState::ChannelReady as u32 || + if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 || (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 { let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1; if self.context.funding_tx_confirmation_height == 0 { @@ -4857,13 +4976,13 @@ impl Channel where height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id); // If funding_tx_confirmed_in is unset, the channel must not be active - assert!(non_shutdown_state <= ChannelState::ChannelReady as u32); + assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32); assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0); return Err(ClosureReason::FundingTimedOut); } - let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer { - self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger) + let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer { + self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger) } else { None }; Ok((None, timed_out_htlcs, announcement_sigs)) } @@ -4880,7 +4999,7 @@ impl Channel where // larger. If we don't know that time has moved forward, we can just set it to the last // time we saw and it will be ignored. let best_time = self.context.update_time_counter; - match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) { + match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) { Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => { assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?"); assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?"); @@ -4910,7 +5029,7 @@ impl Channel where /// /// [`ChannelReady`]: crate::ln::msgs::ChannelReady fn get_channel_announcement( - &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig, + &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, ) -> Result where NS::Target: NodeSigner { if !self.context.config.announced_channel { return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); @@ -4941,7 +5060,7 @@ impl Channel where } fn get_announcement_sigs( - &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig, + &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig, best_block_height: u32, logger: &L ) -> Option where @@ -4966,7 +5085,7 @@ impl Channel where } log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id()); - let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) { + let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) { Ok(a) => a, Err(e) => { log_trace!(logger, "{:?}", e); @@ -5040,7 +5159,7 @@ impl Channel where /// channel_announcement message which we can broadcast and storing our counterparty's /// signatures for later reconstruction/rebroadcast of the channel_announcement. pub fn announcement_signatures( - &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, + &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, msg: &msgs::AnnouncementSignatures, user_config: &UserConfig ) -> Result where NS::Target: NodeSigner { let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?; @@ -5070,7 +5189,7 @@ impl Channel where /// Gets a signed channel_announcement for this channel, if we previously received an /// announcement_signatures from our counterparty. pub fn get_signed_channel_announcement( - &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig + &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig ) -> Option where NS::Target: NodeSigner { if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height { return None; @@ -5431,17 +5550,20 @@ impl Channel where } } - pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> { - if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 { - return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string())); - } - self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo { + /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually + /// happened. + pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result { + let new_forwarding_info = Some(CounterpartyForwardingInfo { fee_base_msat: msg.contents.fee_base_msat, fee_proportional_millionths: msg.contents.fee_proportional_millionths, cltv_expiry_delta: msg.contents.cltv_expiry_delta }); + let did_change = self.context.counterparty_forwarding_info != new_forwarding_info; + if did_change { + self.context.counterparty_forwarding_info = new_forwarding_info; + } - Ok(()) + Ok(did_change) } /// Begins the shutdown process, getting a message for the remote peer and returning all @@ -5477,7 +5599,7 @@ impl Channel where // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown // script is set, we just force-close and call it a day. let mut chan_closed = false; - if self.context.channel_state < ChannelState::FundingSent as u32 { + if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { chan_closed = true; } @@ -5506,7 +5628,7 @@ impl Channel where // From here on out, we may not fail! self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; - if self.context.channel_state < ChannelState::FundingSent as u32 { + if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 { self.context.channel_state = ChannelState::ShutdownComplete as u32; } else { self.context.channel_state |= ChannelState::LocalShutdownSent as u32; @@ -5729,6 +5851,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { channel_type_features: channel_type.clone() }, funding_transaction: None, + is_batch_funding: None, counterparty_cur_commitment_point: None, counterparty_prev_commitment_point: None, @@ -5789,7 +5912,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// Note that channel_id changes during this call! /// Do NOT broadcast the funding transaction until after a successful funding_signed call! /// If an Err is returned, it is a ChannelError::Close. - pub fn get_funding_created(mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) + pub fn get_funding_created(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) -> Result<(Channel, msgs::FundingCreated), (Self, ChannelError)> where L::Target: Logger { if !self.context.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); @@ -5821,7 +5944,17 @@ impl OutboundV1Channel where SP::Target: SignerProvider { self.context.channel_state = ChannelState::FundingCreated as u32; self.context.channel_id = funding_txo.to_channel_id(); + + // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100. + // We can skip this if it is a zero-conf channel. + if funding_transaction.is_coin_base() && + self.context.minimum_depth.unwrap_or(0) > 0 && + self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY { + self.context.minimum_depth = Some(COINBASE_MATURITY); + } + self.context.funding_transaction = Some(funding_transaction); + self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding); let channel = Channel { context: self.context, @@ -5866,7 +5999,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. pub(crate) fn maybe_handle_error_without_close( - &mut self, chain_hash: BlockHash, fee_estimator: &LowerBoundedFeeEstimator + &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator ) -> Result where F::Target: FeeEstimator @@ -5898,7 +6031,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { Ok(self.get_open_channel(chain_hash)) } - pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { + pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel { if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } @@ -6371,6 +6504,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { channel_type_features: channel_type.clone() }, funding_transaction: None, + is_batch_funding: None, counterparty_cur_commitment_point: Some(msg.first_per_commitment_point), counterparty_prev_commitment_point: None, @@ -6986,6 +7120,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { (31, channel_pending_event_emitted, option), (35, pending_outbound_skimmed_fees, optional_vec), (37, holding_cell_skimmed_fees, optional_vec), + (38, self.context.is_batch_funding, option), }); Ok(()) @@ -7208,7 +7343,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch }; let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?; - let funding_transaction = Readable::read(reader)?; + let funding_transaction: Option = Readable::read(reader)?; let counterparty_cur_commitment_point = Readable::read(reader)?; @@ -7269,6 +7404,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut pending_outbound_skimmed_fees_opt: Option>> = None; let mut holding_cell_skimmed_fees_opt: Option>> = None; + let mut is_batch_funding: Option<()> = None; + read_tlv_fields!(reader, { (0, announcement_sigs, option), (1, minimum_depth, option), @@ -7294,6 +7431,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (31, channel_pending_event_emitted, option), (35, pending_outbound_skimmed_fees_opt, optional_vec), (37, holding_cell_skimmed_fees_opt, optional_vec), + (38, is_batch_funding, option), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -7301,7 +7439,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // If we've gotten to the funding stage of the channel, populate the signer with its // required channel parameters. let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS); - if non_shutdown_state >= (ChannelState::FundingCreated as u32) { + if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) { holder_signer.provide_channel_parameters(&channel_parameters); } (channel_keys_id, holder_signer) @@ -7451,6 +7589,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_transaction_parameters: channel_parameters, funding_transaction, + is_batch_funding, counterparty_cur_commitment_point, counterparty_prev_commitment_point, @@ -7495,16 +7634,16 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch #[cfg(test)] mod tests { use std::cmp; + use bitcoin::blockdata::constants::ChainHash; use bitcoin::blockdata::script::{Script, Builder}; use bitcoin::blockdata::transaction::{Transaction, TxOut}; - use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::opcodes; use bitcoin::network::constants::Network; use hex; use crate::ln::PaymentHash; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; use crate::ln::channel::InitFeatures; - use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; + use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; @@ -7641,7 +7780,7 @@ mod tests { // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. fee_est.fee_est = 500; - let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); assert_eq!(open_channel_msg.feerate_per_kw, original_fee); } @@ -7667,7 +7806,7 @@ mod tests { // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. - let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); @@ -7683,7 +7822,7 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed @@ -7785,7 +7924,7 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let best_block = BestBlock::from_network(network); - let chain_hash = best_block.block_hash(); + let chain_hash = ChainHash::using_genesis_block(network); let keys_provider = test_utils::TestKeysInterface::new(&seed, network); // Go through the flow of opening a channel between two nodes. @@ -7810,7 +7949,7 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed @@ -7818,7 +7957,7 @@ mod tests { // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. - node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger); + assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); let msg = node_b_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number @@ -7826,7 +7965,7 @@ mod tests { // Check that the commitment point in Node A's channel_reestablish message // is sane. - node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger); + assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok()); let msg = node_a_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number @@ -7865,7 +8004,7 @@ mod tests { let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); - let chan_1_open_channel_msg = chan_1.get_open_channel(genesis_block(network).header.block_hash()); + let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network)); // Test that `InboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, @@ -7946,7 +8085,7 @@ mod tests { let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); - let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash()); + let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network)); let mut inbound_node_config = UserConfig::default(); inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32; @@ -7972,7 +8111,7 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let best_block = BestBlock::from_network(network); - let chain_hash = genesis_block(network).header.block_hash(); + let chain_hash = ChainHash::using_genesis_block(network); let keys_provider = test_utils::TestKeysInterface::new(&seed, network); // Create Node A's channel pointing to Node B's pubkey @@ -7982,7 +8121,7 @@ mod tests { // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. - let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap(); @@ -7998,7 +8137,7 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap(); + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed @@ -8020,7 +8159,7 @@ mod tests { }, signature: Signature::from(unsafe { FFISignature::new() }) }; - node_a_chan.channel_update(&update).unwrap(); + assert!(node_a_chan.channel_update(&update).unwrap()); // The counterparty can send an update with a higher minimum HTLC, but that shouldn't // change our official htlc_minimum_msat. @@ -8033,6 +8172,8 @@ mod tests { }, None => panic!("expected counterparty forwarding info to be Some") } + + assert!(!node_a_chan.channel_update(&update).unwrap()); } #[cfg(feature = "_test_vectors")] @@ -8817,7 +8958,7 @@ mod tests { let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); - let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash()); + let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); open_channel_msg.channel_type = Some(channel_type_features); let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, @@ -8860,7 +9001,7 @@ mod tests { &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); - let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), @@ -8898,7 +9039,7 @@ mod tests { ).unwrap(); // Set `channel_type` to `None` to force the implicit feature negotiation. - let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); + let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); open_channel_msg.channel_type = None; // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts @@ -8943,7 +9084,7 @@ mod tests { &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42 ).unwrap(); - let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); + let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone()); let res = InboundV1Channel::<&TestKeysInterface>::new( @@ -8962,7 +9103,7 @@ mod tests { 10000000, 100000, 42, &config, 0, 42 ).unwrap(); - let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash()); + let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); let channel_b = InboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_a, @@ -8978,4 +9119,146 @@ mod tests { ); assert!(res.is_err()); } + + #[test] + fn test_waiting_for_batch() { + let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000}); + let logger = test_utils::TestLogger::new(); + let secp_ctx = Secp256k1::new(); + let seed = [42; 32]; + let network = Network::Testnet; + let best_block = BestBlock::from_network(network); + let chain_hash = ChainHash::using_genesis_block(network); + let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + + let mut config = UserConfig::default(); + // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a + // channel in a batch before all channels are ready. + config.channel_handshake_limits.trust_own_funding_0conf = true; + + // Create a channel from node a to node b that will be part of batch funding. + let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new( + &feeest, + &&keys_provider, + &&keys_provider, + node_b_node_id, + &channelmanager::provided_init_features(&config), + 10000000, + 100000, + 42, + &config, + 0, + 42, + ).unwrap(); + + let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)); + let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap()); + let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new( + &feeest, + &&keys_provider, + &&keys_provider, + node_b_node_id, + &channelmanager::provided_channel_type_features(&config), + &channelmanager::provided_init_features(&config), + &open_channel_msg, + 7, + &config, + 0, + &&logger, + true, // Allow node b to send a 0conf channel_ready. + ).unwrap(); + + let accept_channel_msg = node_b_chan.accept_inbound_channel(); + node_a_chan.accept_channel( + &accept_channel_msg, + &config.channel_handshake_limits, + &channelmanager::provided_init_features(&config), + ).unwrap(); + + // Fund the channel with a batch funding transaction. + let output_script = node_a_chan.context.get_funding_redeemscript(); + let tx = Transaction { + version: 1, + lock_time: PackedLockTime::ZERO, + input: Vec::new(), + output: vec![ + TxOut { + value: 10000000, script_pubkey: output_script.clone(), + }, + TxOut { + value: 10000000, script_pubkey: Builder::new().into_script(), + }, + ]}; + let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; + let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created( + tx.clone(), + funding_outpoint, + true, + &&logger, + ).map_err(|_| ()).unwrap(); + let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created( + &funding_created_msg, + best_block, + &&keys_provider, + &&logger, + ).map_err(|_| ()).unwrap(); + let node_b_updates = node_b_chan.monitor_updating_restored( + &&logger, + &&keys_provider, + chain_hash, + &config, + 0, + ); + + // Receive funding_signed, but the channel will be configured to hold sending channel_ready and + // broadcasting the funding transaction until the batch is ready. + let _ = node_a_chan.funding_signed( + &funding_signed_msg, + best_block, + &&keys_provider, + &&logger, + ).unwrap(); + let node_a_updates = node_a_chan.monitor_updating_restored( + &&logger, + &&keys_provider, + chain_hash, + &config, + 0, + ); + // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set, + // as the funding transaction depends on all channels in the batch becoming ready. + assert!(node_a_updates.channel_ready.is_none()); + assert!(node_a_updates.funding_broadcastable.is_none()); + assert_eq!( + node_a_chan.context.channel_state, + ChannelState::FundingSent as u32 | + ChannelState::WaitingForBatch as u32, + ); + + // It is possible to receive a 0conf channel_ready from the remote node. + node_a_chan.channel_ready( + &node_b_updates.channel_ready.unwrap(), + &&keys_provider, + chain_hash, + &config, + &best_block, + &&logger, + ).unwrap(); + assert_eq!( + node_a_chan.context.channel_state, + ChannelState::FundingSent as u32 | + ChannelState::WaitingForBatch as u32 | + ChannelState::TheirChannelReady as u32, + ); + + // Clear the ChannelState::WaitingForBatch only when called by ChannelManager. + node_a_chan.set_batch_ready(); + assert_eq!( + node_a_chan.context.channel_state, + ChannelState::FundingSent as u32 | + ChannelState::TheirChannelReady as u32, + ); + assert!(node_a_chan.check_get_channel_ready(0).is_some()); + } }