X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=3a324e6f4d8856197e8351ef675809aa48a71825;hb=adc1b55a6fa064852d838ceb91b11e6b228d169e;hp=39e07bed8c49ed022d667b10b4ddfcb74d39334b;hpb=15dbe55e676e858ddfde774c9e24a2ede4421ec5;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 39e07bed..3a324e6f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -25,7 +25,7 @@ use bitcoin::secp256k1; use crate::ln::{PaymentPreimage, PaymentHash}; use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; -use crate::ln::msgs::{DecodeError, OptionalField, DataLossProtect}; +use crate::ln::msgs::DecodeError; use crate::ln::script::{self, ShutdownScript}; use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; @@ -35,7 +35,7 @@ use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; +use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; use crate::events::ClosureReason; use crate::routing::gossip::NodeId; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; @@ -312,9 +312,9 @@ pub(super) enum ChannelUpdateStatus { /// We've announced the channel as enabled and are connected to our peer. Enabled, /// Our channel is no longer live, but we haven't announced the channel as disabled yet. - DisabledStaged, + DisabledStaged(u8), /// Our channel is live again, but we haven't announced the channel as enabled yet. - EnabledStaged, + EnabledStaged(u8), /// We've announced the channel as disabled. Disabled, } @@ -499,6 +499,7 @@ pub(super) struct Channel { user_id: u128, channel_id: [u8; 32], + temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115. channel_state: u32, // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to @@ -652,7 +653,7 @@ pub(super) struct Channel { pub counterparty_max_accepted_htlcs: u16, #[cfg(not(test))] counterparty_max_accepted_htlcs: u16, - //implied by OUR_MAX_HTLCS: max_accepted_htlcs: u16, + holder_max_accepted_htlcs: u16, minimum_depth: Option, counterparty_forwarding_info: Option, @@ -729,6 +730,9 @@ pub(super) struct Channel { // blinded paths instead of simple scid+node_id aliases. outbound_scid_alias: u64, + // We track whether we already emitted a `ChannelPending` event. + channel_pending_event_emitted: bool, + // We track whether we already emitted a `ChannelReady` event. channel_ready_event_emitted: bool, @@ -752,7 +756,7 @@ struct CommitmentTxInfoCached { feerate: u32, } -pub const OUR_MAX_HTLCS: u16 = 50; //TODO +pub const DEFAULT_MAX_HTLCS: u16 = 50; pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 { const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; @@ -982,7 +986,10 @@ impl Channel { secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - Some(signer_provider.get_shutdown_scriptpubkey()) + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), + } } else { None }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { @@ -991,6 +998,13 @@ impl Channel { } } + let destination_script = match signer_provider.get_destination_script() { + Ok(script) => script, + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), + }; + + let temporary_channel_id = entropy_source.get_secure_random_bytes(); + Ok(Channel { user_id, @@ -1004,7 +1018,8 @@ impl Channel { inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), - channel_id: entropy_source.get_secure_random_bytes(), + channel_id: temporary_channel_id, + temporary_channel_id: Some(temporary_channel_id), channel_state: ChannelState::OurInitSent as u32, announcement_sigs_state: AnnouncementSigsState::NotSent, secp_ctx, @@ -1014,7 +1029,7 @@ impl Channel { holder_signer, shutdown_scriptpubkey, - destination_script: signer_provider.get_destination_script(), + destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, @@ -1065,6 +1080,7 @@ impl Channel { counterparty_htlc_minimum_msat: 0, holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, counterparty_max_accepted_htlcs: 0, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), minimum_depth: None, // Filled in in accept_channel counterparty_forwarding_info: None, @@ -1103,6 +1119,7 @@ impl Channel { latest_inbound_scid_alias: None, outbound_scid_alias, + channel_pending_event_emitted: false, channel_ready_event_emitted: false, #[cfg(any(test, fuzzing))] @@ -1305,7 +1322,7 @@ impl Channel { let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { match &msg.shutdown_scriptpubkey { - &OptionalField::Present(ref script) => { + &Some(ref script) => { // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything if script.len() == 0 { None @@ -1317,14 +1334,17 @@ impl Channel { } }, // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &OptionalField::Absent => { + &None => { return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); } } } else { None }; let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - Some(signer_provider.get_shutdown_scriptpubkey()) + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())), + } } else { None }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { @@ -1333,6 +1353,11 @@ impl Channel { } } + let destination_script = match signer_provider.get_destination_script() { + Ok(script) => script, + Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())), + }; + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); @@ -1350,6 +1375,7 @@ impl Channel { inbound_handshake_limits_override: None, channel_id: msg.temporary_channel_id, + temporary_channel_id: Some(msg.temporary_channel_id), channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32), announcement_sigs_state: AnnouncementSigsState::NotSent, secp_ctx, @@ -1358,7 +1384,7 @@ impl Channel { holder_signer, shutdown_scriptpubkey, - destination_script: signer_provider.get_destination_script(), + destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, @@ -1410,6 +1436,7 @@ impl Channel { counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)), counterparty_forwarding_info: None, @@ -1451,6 +1478,7 @@ impl Channel { latest_inbound_scid_alias: None, outbound_scid_alias, + channel_pending_event_emitted: false, channel_ready_event_emitted: false, #[cfg(any(test, fuzzing))] @@ -2179,7 +2207,7 @@ impl Channel { let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { match &msg.shutdown_scriptpubkey { - &OptionalField::Present(ref script) => { + &Some(ref script) => { // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything if script.len() == 0 { None @@ -2191,7 +2219,7 @@ impl Channel { } }, // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &OptionalField::Absent => { + &None => { return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); } } @@ -2864,8 +2892,8 @@ impl Channel { let inbound_stats = self.get_inbound_pending_htlc_stats(None); let outbound_stats = self.get_outbound_pending_htlc_stats(None); - if inbound_stats.pending_htlcs + 1 > OUR_MAX_HTLCS as u32 { - return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS))); + if inbound_stats.pending_htlcs + 1 > self.holder_max_accepted_htlcs as u32 { + return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.holder_max_accepted_htlcs))); } if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.holder_max_htlc_value_in_flight_msat { return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.holder_max_htlc_value_in_flight_msat))); @@ -3124,9 +3152,24 @@ impl Channel { return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))); } - // TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update + // Up to LDK 0.0.115, HTLC information was required to be duplicated in the + // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed + // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of + // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for + // backwards compatibility, we never use it in production. To provide test coverage, here, + // we randomly decide (in test/fuzzing builds) to use the new vec sometimes. + #[allow(unused_assignments, unused_mut)] + let mut separate_nondust_htlc_sources = false; + #[cfg(all(feature = "std", any(test, fuzzing)))] { + use core::hash::{BuildHasher, Hasher}; + // Get a random value using the only std API to do so - the DefaultHasher + let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish(); + separate_nondust_htlc_sources = rand_val % 2 == 0; + } + + let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len()); let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len()); - for (idx, (htlc, source)) in htlcs_cloned.drain(..).enumerate() { + for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() { if let Some(_) = htlc.transaction_output_index { let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw, self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.opt_anchors(), @@ -3141,10 +3184,18 @@ impl Channel { if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); } - htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source)); + if !separate_nondust_htlc_sources { + htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take())); + } } else { - htlcs_and_sigs.push((htlc, None, source)); + htlcs_and_sigs.push((htlc, None, source_opt.take())); + } + if separate_nondust_htlc_sources { + if let Some(source) = source_opt.take() { + nondust_htlc_sources.push(source); + } } + debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere"); } let holder_commitment_tx = HolderCommitmentTransaction::new( @@ -3207,6 +3258,7 @@ impl Channel { commitment_tx: holder_commitment_tx, htlc_outputs: htlcs_and_sigs, claimed_htlcs, + nondust_htlc_sources, }] }; @@ -4007,32 +4059,27 @@ impl Channel { } if msg.next_remote_commitment_number > 0 { - match msg.data_loss_protect { - OptionalField::Present(ref data_loss) => { - let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx); - let given_secret = SecretKey::from_slice(&data_loss.your_last_per_commitment_secret) - .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; - if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); - } - if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { - macro_rules! log_and_panic { - ($err_msg: expr) => { - log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); - panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); - } - } - log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ - This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ - More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ - If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ - ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ - ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ - Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ - See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); + let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx); + let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) + .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; + if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) { + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + } + if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { + macro_rules! log_and_panic { + ($err_msg: expr) => { + log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); + panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); } - }, - OptionalField::Absent => {} + } + log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ + This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ + More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ + If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ + ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ + ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ + Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ + See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); } } @@ -4319,7 +4366,10 @@ impl Channel { Some(_) => false, None => { assert!(send_shutdown); - let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey(); + let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => scriptpubkey, + Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())), + }; if !shutdown_scriptpubkey.is_compatible(their_features) { return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); } @@ -4554,6 +4604,13 @@ impl Channel { self.channel_id } + // Return the `temporary_channel_id` used during channel establishment. + // + // Will return `None` for channels created prior to LDK version 0.0.115. + pub fn temporary_channel_id(&self) -> Option<[u8; 32]> { + self.temporary_channel_id + } + pub fn minimum_depth(&self) -> Option { self.minimum_depth } @@ -4698,6 +4755,21 @@ impl Channel { self.prev_config.map(|prev_config| prev_config.0) } + // Checks whether we should emit a `ChannelPending` event. + pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool { + self.is_funding_initiated() && !self.channel_pending_event_emitted + } + + // Returns whether we already emitted a `ChannelPending` event. + pub(crate) fn channel_pending_event_emitted(&self) -> bool { + self.channel_pending_event_emitted + } + + // Remembers that we already emitted a `ChannelPending` event. + pub(crate) fn set_channel_pending_event_emitted(&mut self) { + self.channel_pending_event_emitted = true; + } + // Checks whether we should emit a `ChannelReady` event. pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool { self.is_usable() && !self.channel_ready_event_emitted @@ -5257,7 +5329,7 @@ impl Channel { htlc_minimum_msat: self.holder_htlc_minimum_msat, feerate_per_kw: self.feerate_per_kw as u32, to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: OUR_MAX_HTLCS, + max_accepted_htlcs: self.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, payment_point: keys.payment_point, @@ -5265,7 +5337,7 @@ impl Channel { htlc_basepoint: keys.htlc_basepoint, first_per_commitment_point, channel_flags: if self.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey { + shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), @@ -5324,14 +5396,14 @@ impl Channel { htlc_minimum_msat: self.holder_htlc_minimum_msat, minimum_depth: self.minimum_depth.unwrap(), to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: OUR_MAX_HTLCS, + max_accepted_htlcs: self.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint, htlc_basepoint: keys.htlc_basepoint, first_per_commitment_point, - shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey { + shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), @@ -5593,19 +5665,13 @@ impl Channel { // valid, and valid in fuzzing mode's arbitrary validity criteria: let mut pk = [2; 33]; pk[1] = 0xff; let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); - let data_loss_protect = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { + let remote_last_secret = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap(); log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id())); - OptionalField::Present(DataLossProtect { - your_last_per_commitment_secret: remote_last_secret, - my_current_per_commitment_point: dummy_pubkey - }) + remote_last_secret } else { log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id())); - OptionalField::Present(DataLossProtect { - your_last_per_commitment_secret: [0;32], - my_current_per_commitment_point: dummy_pubkey, - }) + [0;32] }; msgs::ChannelReestablish { channel_id: self.channel_id(), @@ -5627,7 +5693,11 @@ impl Channel { // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't // overflow here. next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number - 1, - data_loss_protect, + your_last_per_commitment_secret: remote_last_secret, + my_current_per_commitment_point: dummy_pubkey, + // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction construction but have not received `tx_signatures` + // we MUST set `next_funding_txid` to the txid of that interactive transaction, else we MUST NOT set it. + next_funding_txid: None, } } @@ -5973,7 +6043,7 @@ impl Channel { /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no /// [`ChannelMonitorUpdate`] will be returned). pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, - target_feerate_sats_per_kw: Option) + target_feerate_sats_per_kw: Option, override_shutdown_script: Option) -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError> where SP::Target: SignerProvider { for htlc in self.pending_outbound_htlcs.iter() { @@ -5989,6 +6059,9 @@ impl Channel { return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()}); } } + if self.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() { + return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()}); + } assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 { return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); @@ -6004,7 +6077,17 @@ impl Channel { let update_shutdown_script = match self.shutdown_scriptpubkey { Some(_) => false, None if !chan_closed => { - let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey(); + // use override shutdown script if provided + let shutdown_scriptpubkey = match override_shutdown_script { + Some(script) => script, + None => { + // otherwise, use the shutdown scriptpubkey provided by the signer + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => scriptpubkey, + Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}), + } + }, + }; if !shutdown_scriptpubkey.is_compatible(their_features) { return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); } @@ -6135,8 +6218,8 @@ impl Writeable for ChannelUpdateStatus { // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1. match self { ChannelUpdateStatus::Enabled => 0u8.write(writer)?, - ChannelUpdateStatus::DisabledStaged => 0u8.write(writer)?, - ChannelUpdateStatus::EnabledStaged => 1u8.write(writer)?, + ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?, + ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?, ChannelUpdateStatus::Disabled => 1u8.write(writer)?, } Ok(()) @@ -6432,6 +6515,7 @@ impl Writeable for Channel { if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config) { Some(self.holder_max_htlc_value_in_flight_msat) } else { None }; + let channel_pending_event_emitted = Some(self.channel_pending_event_emitted); let channel_ready_event_emitted = Some(self.channel_ready_event_emitted); // `user_id` used to be a single u64 value. In order to remain backwards compatible with @@ -6439,6 +6523,8 @@ impl Writeable for Channel { // we write the high bytes as an option here. let user_id_high_opt = Some((self.user_id >> 64) as u64); + let holder_max_accepted_htlcs = if self.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.holder_max_accepted_htlcs) }; + write_tlv_fields!(writer, { (0, self.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -6464,6 +6550,9 @@ impl Writeable for Channel { (23, channel_ready_event_emitted, option), (25, user_id_high_opt, option), (27, self.channel_keys_id, required), + (28, holder_max_accepted_htlcs, option), + (29, self.temporary_channel_id, option), + (31, channel_pending_event_emitted, option), }); Ok(()) @@ -6530,7 +6619,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let value_to_self_msat = Readable::read(reader)?; let pending_inbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, OUR_MAX_HTLCS as usize)); + + let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..pending_inbound_htlc_count { pending_inbound_htlcs.push(InboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -6548,7 +6638,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch } let pending_outbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, OUR_MAX_HTLCS as usize)); + let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..pending_outbound_htlc_count { pending_outbound_htlcs.push(OutboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -6577,7 +6667,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch } let holding_cell_htlc_update_count: u64 = Readable::read(reader)?; - let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, OUR_MAX_HTLCS as usize*2)); + let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2)); for _ in 0..holding_cell_htlc_update_count { holding_cell_htlc_updates.push(match ::read(reader)? { 0 => HTLCUpdateAwaitingACK::AddHTLC { @@ -6610,13 +6700,13 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let monitor_pending_commitment_signed = Readable::read(reader)?; let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, OUR_MAX_HTLCS as usize)); + let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..monitor_pending_forwards_count { monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; - let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, OUR_MAX_HTLCS as usize)); + let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..monitor_pending_failures_count { monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)); } @@ -6731,10 +6821,13 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent); let mut latest_inbound_scid_alias = None; let mut outbound_scid_alias = None; + let mut channel_pending_event_emitted = None; let mut channel_ready_event_emitted = None; let mut user_id_high_opt: Option = None; let mut channel_keys_id: Option<[u8; 32]> = None; + let mut temporary_channel_id: Option<[u8; 32]> = None; + let mut holder_max_accepted_htlcs: Option = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -6755,6 +6848,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (23, channel_ready_event_emitted, option), (25, user_id_high_opt, option), (27, channel_keys_id, option), + (28, holder_max_accepted_htlcs, option), + (29, temporary_channel_id, option), + (31, channel_pending_event_emitted, option), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -6807,6 +6903,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // separate u64 values. let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64); + let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS); + Ok(Channel { user_id, @@ -6819,6 +6917,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch inbound_handshake_limits_override: None, channel_id, + temporary_channel_id, channel_state, announcement_sigs_state: announcement_sigs_state.unwrap(), secp_ctx, @@ -6834,6 +6933,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch cur_counterparty_commitment_transaction_number, value_to_self_msat, + holder_max_accepted_htlcs, pending_inbound_htlcs, pending_outbound_htlcs, holding_cell_htlc_updates, @@ -6911,6 +7011,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing outbound_scid_alias: outbound_scid_alias.unwrap_or(0), + channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), #[cfg(any(test, fuzzing))] @@ -6940,14 +7041,15 @@ mod tests { use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; - use crate::ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT}; + use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; use crate::ln::script::ShutdownScript; use crate::ln::chan_utils; use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight}; use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget}; - use crate::chain::keysinterface::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider}; + use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider}; use crate::chain::transaction::OutPoint; + use crate::routing::router::Path; use crate::util::config::UserConfig; use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::errors::APIError; @@ -7010,17 +7112,17 @@ mod tests { fn read_chan_signer(&self, _data: &[u8]) -> Result { panic!(); } - fn get_destination_script(&self) -> Script { + fn get_destination_script(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize()); - Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script() + Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script()) } - fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { + fn get_shutdown_scriptpubkey(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); - ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)) + Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key))) } } @@ -7125,11 +7227,10 @@ mod tests { cltv_expiry: 200000000, state: OutboundHTLCState::Committed, source: HTLCSource::OutboundRoute { - path: Vec::new(), + path: Path { hops: Vec::new(), blinded_tail: None }, session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(), first_hop_htlc_msat: 548, payment_id: PaymentId([42; 32]), - payment_secret: None, } }); @@ -7241,12 +7342,7 @@ mod tests { let msg = node_b_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number - match msg.data_loss_protect { - OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => { - assert_eq!(your_last_per_commitment_secret, [0; 32]); - }, - _ => panic!() - } + assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); // Check that the commitment point in Node A's channel_reestablish message // is sane. @@ -7254,12 +7350,7 @@ mod tests { let msg = node_a_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number - match msg.data_loss_protect { - OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => { - assert_eq!(your_last_per_commitment_secret, [0; 32]); - }, - _ => panic!() - } + assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); } #[test] @@ -7441,7 +7532,7 @@ mod tests { } } - #[cfg(not(feature = "grind_signatures"))] + #[cfg(feature = "_test_vectors")] #[test] fn outbound_commitment_test() { use bitcoin::util::sighash; @@ -7450,7 +7541,7 @@ mod tests { use bitcoin::hashes::hex::FromHex; use bitcoin::hash_types::Txid; use bitcoin::secp256k1::Message; - use crate::chain::keysinterface::EcdsaChannelSigner; + use crate::sign::EcdsaChannelSigner; use crate::ln::PaymentPreimage; use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys}; use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; @@ -7474,6 +7565,7 @@ mod tests { [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff], 10_000_000, [0; 32], + [0; 32], ); assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],