X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=8cb73241562e7187deebd75dd3e3ef8858a9abc6;hb=42e2f1d1a645156180cbcc664989df78175301d2;hp=2d8577e98cf8ba3242f8b49f474c2147e71c8394;hpb=1016e1f605ff03ed14ed875e7cd4f567ae15c96a;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2d8577e9..8cb73241 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -25,7 +25,7 @@ use bitcoin::secp256k1; use crate::ln::{PaymentPreimage, PaymentHash}; use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; -use crate::ln::msgs::{DecodeError, OptionalField, DataLossProtect}; +use crate::ln::msgs::DecodeError; use crate::ln::script::{self, ShutdownScript}; use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; @@ -35,7 +35,7 @@ use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; +use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; use crate::events::ClosureReason; use crate::routing::gossip::NodeId; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; @@ -73,6 +73,8 @@ pub struct AvailableBalances { pub outbound_capacity_msat: u64, /// The maximum value we can assign to the next outbound HTLC pub next_outbound_htlc_limit_msat: u64, + /// The minimum value we can assign to the next outbound HTLC + pub next_outbound_htlc_minimum_msat: u64, } #[derive(Debug, Clone, Copy, PartialEq)] @@ -312,9 +314,9 @@ pub(super) enum ChannelUpdateStatus { /// We've announced the channel as enabled and are connected to our peer. Enabled, /// Our channel is no longer live, but we haven't announced the channel as disabled yet. - DisabledStaged, + DisabledStaged(u8), /// Our channel is live again, but we haven't announced the channel as enabled yet. - EnabledStaged, + EnabledStaged(u8), /// We've announced the channel as disabled. Disabled, } @@ -432,6 +434,12 @@ pub(super) struct ReestablishResponses { pub shutdown_msg: Option, } +/// The return type of `force_shutdown` +pub(crate) type ShutdownResult = ( + Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, + Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])> +); + /// If the majority of the channels funds are to the fundee and the initiator holds only just /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the /// initiator controls the feerate, if they then go to increase the channel fee, they may have no @@ -479,6 +487,28 @@ pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4; /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5; +/// The number of ticks that may elapse while we're waiting for a response to a +/// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect +/// them. +/// +/// See [`Channel::sent_message_awaiting_response`] for more information. +pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2; + +struct PendingChannelMonitorUpdate { + update: ChannelMonitorUpdate, + /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an + /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is + /// blocked on some external event and the [`ChannelManager`] will update us when we're ready. + /// + /// [`ChannelManager`]: super::channelmanager::ChannelManager + blocked: bool, +} + +impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { + (0, update, required), + (2, blocked, required), +}); + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -653,7 +683,7 @@ pub(super) struct Channel { pub counterparty_max_accepted_htlcs: u16, #[cfg(not(test))] counterparty_max_accepted_htlcs: u16, - //implied by OUR_MAX_HTLCS: max_accepted_htlcs: u16, + holder_max_accepted_htlcs: u16, minimum_depth: Option, counterparty_forwarding_info: Option, @@ -700,6 +730,19 @@ pub(super) struct Channel { /// See-also pub workaround_lnd_bug_4006: Option, + /// An option set when we wish to track how many ticks have elapsed while waiting for a response + /// from our counterparty after sending a message. If the peer has yet to respond after reaching + /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to + /// unblock the state machine. + /// + /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect + /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An + /// example of such can be found at . + /// + /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or + /// [`msgs::RevokeAndACK`] message from the counterparty. + sent_message_awaiting_response: Option, + #[cfg(any(test, fuzzing))] // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the // corresponding HTLC on the inbound path. If, then, the outbound path channel is @@ -744,7 +787,7 @@ pub(super) struct Channel { /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence /// completes we still need to be able to complete the persistence. Thus, we have to keep a /// copy of the [`ChannelMonitorUpdate`] here until it is complete. - pending_monitor_updates: Vec, + pending_monitor_updates: Vec, } #[cfg(any(test, fuzzing))] @@ -756,7 +799,7 @@ struct CommitmentTxInfoCached { feerate: u32, } -pub const OUR_MAX_HTLCS: u16 = 50; //TODO +pub const DEFAULT_MAX_HTLCS: u16 = 50; pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 { const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; @@ -986,7 +1029,10 @@ impl Channel { secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - Some(signer_provider.get_shutdown_scriptpubkey()) + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), + } } else { None }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { @@ -995,6 +1041,11 @@ impl Channel { } } + let destination_script = match signer_provider.get_destination_script() { + Ok(script) => script, + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), + }; + let temporary_channel_id = entropy_source.get_secure_random_bytes(); Ok(Channel { @@ -1021,7 +1072,7 @@ impl Channel { holder_signer, shutdown_scriptpubkey, - destination_script: signer_provider.get_destination_script(), + destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, @@ -1072,6 +1123,7 @@ impl Channel { counterparty_htlc_minimum_msat: 0, holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, counterparty_max_accepted_htlcs: 0, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), minimum_depth: None, // Filled in in accept_channel counterparty_forwarding_info: None, @@ -1106,6 +1158,7 @@ impl Channel { next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, latest_inbound_scid_alias: None, outbound_scid_alias, @@ -1313,7 +1366,7 @@ impl Channel { let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { match &msg.shutdown_scriptpubkey { - &OptionalField::Present(ref script) => { + &Some(ref script) => { // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything if script.len() == 0 { None @@ -1325,14 +1378,17 @@ impl Channel { } }, // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &OptionalField::Absent => { + &None => { return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); } } } else { None }; let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - Some(signer_provider.get_shutdown_scriptpubkey()) + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())), + } } else { None }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { @@ -1341,6 +1397,11 @@ impl Channel { } } + let destination_script = match signer_provider.get_destination_script() { + Ok(script) => script, + Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())), + }; + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); @@ -1367,7 +1428,7 @@ impl Channel { holder_signer, shutdown_scriptpubkey, - destination_script: signer_provider.get_destination_script(), + destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, @@ -1419,6 +1480,7 @@ impl Channel { counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)), counterparty_forwarding_info: None, @@ -1456,6 +1518,7 @@ impl Channel { next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, latest_inbound_scid_alias: None, outbound_scid_alias, @@ -1977,28 +2040,52 @@ impl Channel { } pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { + let release_cs_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked); match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { - UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => { - let mut additional_update = self.build_commitment_no_status_check(logger); - // build_commitment_no_status_check may bump latest_monitor_id but we want them to be - // strictly increasing by one, so decrement it here. - self.latest_monitor_update_id = monitor_update.update_id; - monitor_update.updates.append(&mut additional_update.updates); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); - self.pending_monitor_updates.push(monitor_update); + UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => { + // Even if we aren't supposed to let new monitor updates with commitment state + // updates run, we still need to push the preimage ChannelMonitorUpdateStep no + // matter what. Sadly, to push a new monitor update which flies before others + // already queued, we have to insert it into the pending queue and update the + // update_ids of all the following monitors. + let unblocked_update_pos = if release_cs_monitor && msg.is_some() { + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them + // to be strictly increasing by one, so decrement it here. + self.latest_monitor_update_id = monitor_update.update_id; + monitor_update.updates.append(&mut additional_update.updates); + self.pending_monitor_updates.push(PendingChannelMonitorUpdate { + update: monitor_update, blocked: false, + }); + self.pending_monitor_updates.len() - 1 + } else { + let insert_pos = self.pending_monitor_updates.iter().position(|upd| upd.blocked) + .unwrap_or(self.pending_monitor_updates.len()); + let new_mon_id = self.pending_monitor_updates.get(insert_pos) + .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id); + monitor_update.update_id = new_mon_id; + self.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate { + update: monitor_update, blocked: false, + }); + for held_update in self.pending_monitor_updates.iter_mut().skip(insert_pos + 1) { + held_update.update.update_id += 1; + } + if msg.is_some() { + debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set"); + let update = self.build_commitment_no_status_check(logger); + self.pending_monitor_updates.push(PendingChannelMonitorUpdate { + update, blocked: true, + }); + } + insert_pos + }; + self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new()); UpdateFulfillCommitFetch::NewClaim { - monitor_update: self.pending_monitor_updates.last().unwrap(), + monitor_update: &self.pending_monitor_updates.get(unblocked_update_pos) + .expect("We just pushed the monitor update").update, htlc_value_msat, } }, - UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => { - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - self.pending_monitor_updates.push(monitor_update); - UpdateFulfillCommitFetch::NewClaim { - monitor_update: self.pending_monitor_updates.last().unwrap(), - htlc_value_msat, - } - } UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, } } @@ -2189,7 +2276,7 @@ impl Channel { let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { match &msg.shutdown_scriptpubkey { - &OptionalField::Present(ref script) => { + &Some(ref script) => { // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything if script.len() == 0 { None @@ -2201,7 +2288,7 @@ impl Channel { } }, // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &OptionalField::Absent => { + &None => { return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); } } @@ -2619,6 +2706,7 @@ impl Channel { /// corner case properly. pub fn get_available_balances(&self) -> AvailableBalances { // Note that we have to handle overflow due to the above case. + let inbound_stats = self.get_inbound_pending_htlc_stats(None); let outbound_stats = self.get_outbound_pending_htlc_stats(None); let mut balance_msat = self.value_to_self_msat; @@ -2629,10 +2717,112 @@ impl Channel { } balance_msat -= outbound_stats.pending_htlcs_value_msat; - let outbound_capacity_msat = cmp::max(self.value_to_self_msat as i64 - - outbound_stats.pending_htlcs_value_msat as i64 - - self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) as i64 * 1000, - 0) as u64; + let outbound_capacity_msat = self.value_to_self_msat + .saturating_sub(outbound_stats.pending_htlcs_value_msat) + .saturating_sub( + self.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000); + + let mut available_capacity_msat = outbound_capacity_msat; + + if self.is_outbound() { + // We should mind channel commit tx fee when computing how much of the available capacity + // can be used in the next htlc. Mirrors the logic in send_htlc. + // + // The fee depends on whether the amount we will be sending is above dust or not, + // and the answer will in turn change the amount itself — making it a circular + // dependency. + // This complicates the computation around dust-values, up to the one-htlc-value. + let mut real_dust_limit_timeout_sat = self.holder_dust_limit_satoshis; + if !self.opt_anchors() { + real_dust_limit_timeout_sat += self.feerate_per_kw as u64 * htlc_timeout_tx_weight(false) / 1000; + } + + let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered); + let max_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_above_dust, Some(())); + let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered); + let min_reserved_commit_tx_fee_msat = FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_dust, Some(())); + + // We will first subtract the fee as if we were above-dust. Then, if the resulting + // value ends up being below dust, we have this fee available again. In that case, + // match the value to right-below-dust. + let mut capacity_minus_commitment_fee_msat: i64 = (available_capacity_msat as i64) - (max_reserved_commit_tx_fee_msat as i64); + if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 { + let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat; + debug_assert!(one_htlc_difference_msat != 0); + capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64; + capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat); + available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64; + } else { + available_capacity_msat = capacity_minus_commitment_fee_msat as u64; + } + } else { + // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure + // sending a new HTLC won't reduce their balance below our reserve threshold. + let mut real_dust_limit_success_sat = self.counterparty_dust_limit_satoshis; + if !self.opt_anchors() { + real_dust_limit_success_sat += self.feerate_per_kw as u64 * htlc_success_tx_weight(false) / 1000; + } + + let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered); + let max_reserved_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_above_dust, None); + + let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000; + let remote_balance_msat = (self.channel_value_satoshis * 1000 - self.value_to_self_msat) + .saturating_sub(inbound_stats.pending_htlcs_value_msat); + + if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat { + // If another HTLC's fee would reduce the remote's balance below the reserve limit + // we've selected for them, we can only send dust HTLCs. + available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1); + } + } + + let mut next_outbound_htlc_minimum_msat = self.counterparty_htlc_minimum_msat; + + // If we get close to our maximum dust exposure, we end up in a situation where we can send + // between zero and the remaining dust exposure limit remaining OR above the dust limit. + // Because we cannot express this as a simple min/max, we prefer to tell the user they can + // send above the dust limit (as the router can always overpay to meet the dust limit). + let mut remaining_msat_below_dust_exposure_limit = None; + let mut dust_exposure_dust_limit_msat = 0; + + let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { + (self.counterparty_dust_limit_satoshis, self.holder_dust_limit_satoshis) + } else { + let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; + (self.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, + self.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) + }; + let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; + if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 { + remaining_msat_below_dust_exposure_limit = + Some(self.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat)); + dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); + } + + let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; + if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 { + remaining_msat_below_dust_exposure_limit = Some(cmp::min( + remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), + self.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat))); + dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); + } + + if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit { + if available_capacity_msat < dust_exposure_dust_limit_msat { + available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat); + } else { + next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat); + } + } + + available_capacity_msat = cmp::min(available_capacity_msat, + self.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat); + + if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 { + available_capacity_msat = 0; + } + AvailableBalances { inbound_capacity_msat: cmp::max(self.channel_value_satoshis as i64 * 1000 - self.value_to_self_msat as i64 @@ -2640,10 +2830,8 @@ impl Channel { - self.holder_selected_channel_reserve_satoshis as i64 * 1000, 0) as u64, outbound_capacity_msat, - next_outbound_htlc_limit_msat: cmp::max(cmp::min(outbound_capacity_msat as i64, - self.counterparty_max_htlc_value_in_flight_msat as i64 - - outbound_stats.pending_htlcs_value_msat as i64), - 0) as u64, + next_outbound_htlc_limit_msat: available_capacity_msat, + next_outbound_htlc_minimum_msat, balance_msat, } } @@ -2667,10 +2855,16 @@ impl Channel { feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 } - // Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the - // number of pending HTLCs that are on track to be in our next commitment tx, plus an additional - // HTLC if `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs - // are excluded. + /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the + /// number of pending HTLCs that are on track to be in our next commitment tx. + /// + /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if + /// `fee_spike_buffer_htlc` is `Some`. + /// + /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the + /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added. + /// + /// Dust HTLCs are excluded. fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { assert!(self.is_outbound()); @@ -2764,10 +2958,16 @@ impl Channel { res } - // Get the commitment tx fee for the remote's next commitment transaction based on the number of - // pending HTLCs that are on track to be in their next commitment tx, plus an additional HTLC if - // `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs are - // excluded. + /// Get the commitment tx fee for the remote's next commitment transaction based on the number of + /// pending HTLCs that are on track to be in their next commitment tx + /// + /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if + /// `fee_spike_buffer_htlc` is `Some`. + /// + /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the + /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added. + /// + /// Dust HTLCs are excluded. fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { assert!(!self.is_outbound()); @@ -2874,8 +3074,8 @@ impl Channel { let inbound_stats = self.get_inbound_pending_htlc_stats(None); let outbound_stats = self.get_outbound_pending_htlc_stats(None); - if inbound_stats.pending_htlcs + 1 > OUR_MAX_HTLCS as u32 { - return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS))); + if inbound_stats.pending_htlcs + 1 > self.holder_max_accepted_htlcs as u32 { + return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.holder_max_accepted_htlcs))); } if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.holder_max_htlc_value_in_flight_msat { return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.holder_max_htlc_value_in_flight_msat))); @@ -3066,7 +3266,7 @@ impl Channel { Ok(()) } - pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<&ChannelMonitorUpdate, ChannelError> + pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result, ChannelError> where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { @@ -3266,8 +3466,7 @@ impl Channel { } log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id)); - self.pending_monitor_updates.push(monitor_update); - return Ok(self.pending_monitor_updates.last().unwrap()); + return Ok(self.push_ret_blockable_mon_update(monitor_update)); } let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { @@ -3284,9 +3483,8 @@ impl Channel { log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); - self.pending_monitor_updates.push(monitor_update); self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); - return Ok(self.pending_monitor_updates.last().unwrap()); + return Ok(self.push_ret_blockable_mon_update(monitor_update)); } /// Public version of the below, checking relevant preconditions first. @@ -3401,8 +3599,7 @@ impl Channel { update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len()); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); - self.pending_monitor_updates.push(monitor_update); - (Some(self.pending_monitor_updates.last().unwrap()), htlcs_to_fail) + (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) } @@ -3413,7 +3610,7 @@ impl Channel { /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. - pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, &ChannelMonitorUpdate), ChannelError> + pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError> where L::Target: Logger, { if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { @@ -3472,6 +3669,7 @@ impl Channel { // OK, we step the channel here and *then* if the new generation fails we can fail the // channel based on that, but stepping stuff here should be safe either way. self.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32); + self.sent_message_awaiting_response = None; self.counterparty_prev_commitment_point = self.counterparty_cur_commitment_point; self.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point); self.cur_counterparty_commitment_transaction_number -= 1; @@ -3610,21 +3808,19 @@ impl Channel { self.monitor_pending_failures.append(&mut revoked_htlcs); self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); - self.pending_monitor_updates.push(monitor_update); - return Ok((Vec::new(), self.pending_monitor_updates.last().unwrap())); + return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update))); } match self.free_holding_cell_htlcs(logger) { (Some(_), htlcs_to_fail) => { - let mut additional_update = self.pending_monitor_updates.pop().unwrap(); + let mut additional_update = self.pending_monitor_updates.pop().unwrap().update; // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - self.pending_monitor_updates.push(monitor_update); - Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap())) + Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) }, (None, htlcs_to_fail) => { if require_commitment { @@ -3638,13 +3834,11 @@ impl Channel { log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - self.pending_monitor_updates.push(monitor_update); - Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap())) + Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) } else { log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - self.pending_monitor_updates.push(monitor_update); - Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap())) + Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) } } } @@ -3791,6 +3985,8 @@ impl Channel { } } + self.sent_message_awaiting_response = None; + self.channel_state |= ChannelState::PeerDisconnected as u32; log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id())); } @@ -3833,7 +4029,12 @@ impl Channel { { assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32); self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32); - self.pending_monitor_updates.clear(); + let mut found_blocked = false; + self.pending_monitor_updates.retain(|upd| { + if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); } + if upd.blocked { found_blocked = true; } + upd.blocked + }); // If we're past (or at) the FundingSent stage on an outbound channel, try to // (re-)broadcast the funding transaction as we may have declined to broadcast it when we @@ -3888,6 +4089,7 @@ impl Channel { Some(self.get_last_revoke_and_ack()) } else { None }; let commitment_update = if self.monitor_pending_commitment_signed { + self.mark_awaiting_response(); Some(self.get_last_commitment_update(logger)) } else { None }; @@ -4037,36 +4239,31 @@ impl Channel { if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_local_commitment_number == 0 { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish".to_owned())); + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned())); } if msg.next_remote_commitment_number > 0 { - match msg.data_loss_protect { - OptionalField::Present(ref data_loss) => { - let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx); - let given_secret = SecretKey::from_slice(&data_loss.your_last_per_commitment_secret) - .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; - if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx); + let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) + .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; + if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) { + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + } + if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { + macro_rules! log_and_panic { + ($err_msg: expr) => { + log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); + panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); } - if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { - macro_rules! log_and_panic { - ($err_msg: expr) => { - log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); - panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); - } - } - log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ - This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ - More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ - If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ - ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ - ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ - Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ - See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); - } - }, - OptionalField::Absent => {} + } + log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ + This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ + More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ + If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ + ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ + ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ + Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ + See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); } } @@ -4082,6 +4279,7 @@ impl Channel { // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all // remaining cases either succeed or ErrorMessage-fail). self.channel_state &= !(ChannelState::PeerDisconnected as u32); + self.sent_message_awaiting_response = None; let shutdown_msg = if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 { assert!(self.shutdown_scriptpubkey.is_some()); @@ -4142,7 +4340,11 @@ impl Channel { // revoke_and_ack, not on sending commitment_signed, so we add one if have // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten // the corresponding revoke_and_ack back yet. - let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if (self.channel_state & ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }; + let is_awaiting_remote_revoke = self.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0; + if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() { + self.mark_awaiting_response(); + } + let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 }; let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 { // We should never have to worry about MonitorUpdateInProgress resending ChannelReady @@ -4311,6 +4513,28 @@ impl Channel { }), None)) } + // Marks a channel as waiting for a response from the counterparty. If it's not received + // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt + // a reconnection. + fn mark_awaiting_response(&mut self) { + self.sent_message_awaiting_response = Some(0); + } + + /// Determines whether we should disconnect the counterparty due to not receiving a response + /// within our expected timeframe. + /// + /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`]. + pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool { + let ticks_elapsed = if let Some(ticks_elapsed) = self.sent_message_awaiting_response.as_mut() { + ticks_elapsed + } else { + // Don't disconnect when we're not waiting on a response. + return false; + }; + *ticks_elapsed += 1; + *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS + } + pub fn shutdown( &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown ) -> Result<(Option, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError> @@ -4353,7 +4577,10 @@ impl Channel { Some(_) => false, None => { assert!(send_shutdown); - let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey(); + let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => scriptpubkey, + Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())), + }; if !shutdown_scriptpubkey.is_compatible(their_features) { return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); } @@ -4376,8 +4603,9 @@ impl Channel { }], }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - self.pending_monitor_updates.push(monitor_update); - Some(self.pending_monitor_updates.last().unwrap()) + if self.push_blockable_mon_update(monitor_update) { + self.pending_monitor_updates.last().map(|upd| &upd.update) + } else { None } } else { None }; let shutdown = if send_shutdown { Some(msgs::Shutdown { @@ -4949,8 +5177,64 @@ impl Channel { (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 } - pub fn get_next_monitor_update(&self) -> Option<&ChannelMonitorUpdate> { - self.pending_monitor_updates.first() + pub fn get_latest_complete_monitor_update_id(&self) -> u64 { + if self.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); } + self.pending_monitor_updates[0].update.update_id - 1 + } + + /// Returns the next blocked monitor update, if one exists, and a bool which indicates a + /// further blocked monitor update exists after the next. + pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> { + for i in 0..self.pending_monitor_updates.len() { + if self.pending_monitor_updates[i].blocked { + self.pending_monitor_updates[i].blocked = false; + return Some((&self.pending_monitor_updates[i].update, + self.pending_monitor_updates.len() > i + 1)); + } + } + None + } + + /// Pushes a new monitor update into our monitor update queue, returning whether it should be + /// immediately given to the user for persisting or if it should be held as blocked. + fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool { + let release_monitor = self.pending_monitor_updates.iter().all(|upd| !upd.blocked); + self.pending_monitor_updates.push(PendingChannelMonitorUpdate { + update, blocked: !release_monitor + }); + release_monitor + } + + /// Pushes a new monitor update into our monitor update queue, returning a reference to it if + /// it should be immediately given to the user for persisting or `None` if it should be held as + /// blocked. + fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) + -> Option<&ChannelMonitorUpdate> { + let release_monitor = self.push_blockable_mon_update(update); + if release_monitor { self.pending_monitor_updates.last().map(|upd| &upd.update) } else { None } + } + + pub fn no_monitor_updates_pending(&self) -> bool { + self.pending_monitor_updates.is_empty() + } + + pub fn complete_all_mon_updates_through(&mut self, update_id: u64) { + self.pending_monitor_updates.retain(|upd| { + if upd.update.update_id <= update_id { + assert!(!upd.blocked, "Completed update must have flown"); + false + } else { true } + }); + } + + pub fn complete_one_mon_update(&mut self, update_id: u64) { + self.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id); + } + + /// Returns an iterator over all unblocked monitor updates which have not yet completed. + pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator { + self.pending_monitor_updates.iter() + .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) }) } /// Returns true if funding_created was sent/received. @@ -5313,7 +5597,7 @@ impl Channel { htlc_minimum_msat: self.holder_htlc_minimum_msat, feerate_per_kw: self.feerate_per_kw as u32, to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: OUR_MAX_HTLCS, + max_accepted_htlcs: self.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, payment_point: keys.payment_point, @@ -5321,7 +5605,7 @@ impl Channel { htlc_basepoint: keys.htlc_basepoint, first_per_commitment_point, channel_flags: if self.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey { + shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), @@ -5380,14 +5664,14 @@ impl Channel { htlc_minimum_msat: self.holder_htlc_minimum_msat, minimum_depth: self.minimum_depth.unwrap(), to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: OUR_MAX_HTLCS, + max_accepted_htlcs: self.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint, htlc_basepoint: keys.htlc_basepoint, first_per_commitment_point, - shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey { + shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), @@ -5638,7 +5922,7 @@ impl Channel { /// May panic if called on a channel that wasn't immediately-previously /// self.remove_uncommitted_htlcs_and_mark_paused()'d - pub fn get_channel_reestablish(&self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { + pub fn get_channel_reestablish(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32); assert_ne!(self.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER); // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming @@ -5649,20 +5933,15 @@ impl Channel { // valid, and valid in fuzzing mode's arbitrary validity criteria: let mut pk = [2; 33]; pk[1] = 0xff; let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); - let data_loss_protect = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { + let remote_last_secret = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap(); log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id())); - OptionalField::Present(DataLossProtect { - your_last_per_commitment_secret: remote_last_secret, - my_current_per_commitment_point: dummy_pubkey - }) + remote_last_secret } else { log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id())); - OptionalField::Present(DataLossProtect { - your_last_per_commitment_secret: [0;32], - my_current_per_commitment_point: dummy_pubkey, - }) + [0;32] }; + self.mark_awaiting_response(); msgs::ChannelReestablish { channel_id: self.channel_id(), // The protocol has two different commitment number concepts - the "commitment @@ -5683,7 +5962,12 @@ impl Channel { // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't // overflow here. next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number - 1, - data_loss_protect, + your_last_per_commitment_secret: remote_last_secret, + my_current_per_commitment_point: dummy_pubkey, + // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction + // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the + // txid of that interactive transaction, else we MUST NOT set it. + next_funding_txid: None, } } @@ -5739,8 +6023,15 @@ impl Channel { return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned())); } - if amount_msat < self.counterparty_htlc_minimum_msat { - return Err(ChannelError::Ignore(format!("Cannot send less than their minimum HTLC value ({})", self.counterparty_htlc_minimum_msat))); + let available_balances = self.get_available_balances(); + if amount_msat < available_balances.next_outbound_htlc_minimum_msat { + return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat", + available_balances.next_outbound_htlc_minimum_msat))); + } + + if amount_msat > available_balances.next_outbound_htlc_limit_msat { + return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat", + available_balances.next_outbound_htlc_limit_msat))); } if (self.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 { @@ -5753,75 +6044,13 @@ impl Channel { return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned())); } - let inbound_stats = self.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.get_outbound_pending_htlc_stats(None); - if outbound_stats.pending_htlcs + 1 > self.counterparty_max_accepted_htlcs as u32 { - return Err(ChannelError::Ignore(format!("Cannot push more than their max accepted HTLCs ({})", self.counterparty_max_accepted_htlcs))); - } - // Check their_max_htlc_value_in_flight_msat - if outbound_stats.pending_htlcs_value_msat + amount_msat > self.counterparty_max_htlc_value_in_flight_msat { - return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.counterparty_max_htlc_value_in_flight_msat))); - } + let need_holding_cell = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0; + log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat, + if force_holding_cell { "into holding cell" } + else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" } + else { "to peer" }); - let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number); - let commitment_stats = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, true, logger); - if !self.is_outbound() { - // Check that we won't violate the remote channel reserve by adding this HTLC. - let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered); - let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_candidate, None); - let holder_selected_chan_reserve_msat = self.holder_selected_channel_reserve_satoshis * 1000; - if commitment_stats.remote_balance_msat < counterparty_commit_tx_fee_msat + holder_selected_chan_reserve_msat { - return Err(ChannelError::Ignore("Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_owned())); - } - } - - let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.opt_anchors() { - (0, 0) - } else { - let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64; - (dust_buffer_feerate * htlc_success_tx_weight(false) / 1000, - dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000) - }; - let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.counterparty_dust_limit_satoshis; - if amount_msat / 1000 < exposure_dust_limit_success_sats { - let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + amount_msat; - if on_counterparty_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { - return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - on_counterparty_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()))); - } - } - - let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis; - if amount_msat / 1000 < exposure_dust_limit_timeout_sats { - let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + amount_msat; - if on_holder_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() { - return Err(ChannelError::Ignore(format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", - on_holder_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat()))); - } - } - - let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat; - if holder_balance_msat < amount_msat { - return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, holder_balance_msat))); - } - - // `2 *` and extra HTLC are for the fee spike buffer. - let commit_tx_fee_msat = if self.is_outbound() { - let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered); - FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(())) - } else { 0 }; - if holder_balance_msat - amount_msat < commit_tx_fee_msat { - return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", holder_balance_msat, commit_tx_fee_msat))); - } - - // Check self.counterparty_selected_channel_reserve_satoshis (the amount we must keep as - // reserve for the remote to have something to claim if we misbehave) - let chan_reserve_msat = self.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000; - if holder_balance_msat - amount_msat - commit_tx_fee_msat < chan_reserve_msat { - return Err(ChannelError::Ignore(format!("Cannot send value that would put our balance under counterparty-announced channel reserve value ({})", chan_reserve_msat))); - } - - if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 { + if need_holding_cell { force_holding_cell = true; } @@ -5998,8 +6227,7 @@ impl Channel { Some(_) => { let monitor_update = self.build_commitment_no_status_check(logger); self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); - self.pending_monitor_updates.push(monitor_update); - Ok(Some(self.pending_monitor_updates.last().unwrap())) + Ok(self.push_ret_blockable_mon_update(monitor_update)) }, None => Ok(None) } @@ -6029,7 +6257,7 @@ impl Channel { /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no /// [`ChannelMonitorUpdate`] will be returned). pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, - target_feerate_sats_per_kw: Option) + target_feerate_sats_per_kw: Option, override_shutdown_script: Option) -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError> where SP::Target: SignerProvider { for htlc in self.pending_outbound_htlcs.iter() { @@ -6045,6 +6273,9 @@ impl Channel { return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()}); } } + if self.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() { + return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()}); + } assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 { return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); @@ -6060,7 +6291,17 @@ impl Channel { let update_shutdown_script = match self.shutdown_scriptpubkey { Some(_) => false, None if !chan_closed => { - let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey(); + // use override shutdown script if provided + let shutdown_scriptpubkey = match override_shutdown_script { + Some(script) => script, + None => { + // otherwise, use the shutdown scriptpubkey provided by the signer + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => scriptpubkey, + Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}), + } + }, + }; if !shutdown_scriptpubkey.is_compatible(their_features) { return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); } @@ -6088,8 +6329,9 @@ impl Channel { }], }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - self.pending_monitor_updates.push(monitor_update); - Some(self.pending_monitor_updates.last().unwrap()) + if self.push_blockable_mon_update(monitor_update) { + self.pending_monitor_updates.last().map(|upd| &upd.update) + } else { None } } else { None }; let shutdown = msgs::Shutdown { channel_id: self.channel_id, @@ -6121,7 +6363,7 @@ impl Channel { /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). /// Also returns the list of payment_hashes for channels which we can safely fail backwards /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) { + pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult { // Note that we MUST only generate a monitor update that indicates force-closure - we're // called during initialization prior to the chain_monitor in the encompassing ChannelManager // being fully configured in some cases. Thus, its likely any monitor events we generate will @@ -6150,7 +6392,7 @@ impl Channel { // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; - Some((funding_txo, ChannelMonitorUpdate { + Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], })) @@ -6191,8 +6433,8 @@ impl Writeable for ChannelUpdateStatus { // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1. match self { ChannelUpdateStatus::Enabled => 0u8.write(writer)?, - ChannelUpdateStatus::DisabledStaged => 0u8.write(writer)?, - ChannelUpdateStatus::EnabledStaged => 1u8.write(writer)?, + ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?, + ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?, ChannelUpdateStatus::Disabled => 1u8.write(writer)?, } Ok(()) @@ -6496,6 +6738,8 @@ impl Writeable for Channel { // we write the high bytes as an option here. let user_id_high_opt = Some((self.user_id >> 64) as u64); + let holder_max_accepted_htlcs = if self.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.holder_max_accepted_htlcs) }; + write_tlv_fields!(writer, { (0, self.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -6521,8 +6765,10 @@ impl Writeable for Channel { (23, channel_ready_event_emitted, option), (25, user_id_high_opt, option), (27, self.channel_keys_id, required), + (28, holder_max_accepted_htlcs, option), (29, self.temporary_channel_id, option), (31, channel_pending_event_emitted, option), + (33, self.pending_monitor_updates, vec_type), }); Ok(()) @@ -6589,7 +6835,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let value_to_self_msat = Readable::read(reader)?; let pending_inbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, OUR_MAX_HTLCS as usize)); + + let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..pending_inbound_htlc_count { pending_inbound_htlcs.push(InboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -6607,7 +6854,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch } let pending_outbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, OUR_MAX_HTLCS as usize)); + let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..pending_outbound_htlc_count { pending_outbound_htlcs.push(OutboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -6636,7 +6883,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch } let holding_cell_htlc_update_count: u64 = Readable::read(reader)?; - let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, OUR_MAX_HTLCS as usize*2)); + let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2)); for _ in 0..holding_cell_htlc_update_count { holding_cell_htlc_updates.push(match ::read(reader)? { 0 => HTLCUpdateAwaitingACK::AddHTLC { @@ -6669,13 +6916,13 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let monitor_pending_commitment_signed = Readable::read(reader)?; let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, OUR_MAX_HTLCS as usize)); + let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..monitor_pending_forwards_count { monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; - let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, OUR_MAX_HTLCS as usize)); + let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..monitor_pending_failures_count { monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)); } @@ -6796,6 +7043,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut user_id_high_opt: Option = None; let mut channel_keys_id: Option<[u8; 32]> = None; let mut temporary_channel_id: Option<[u8; 32]> = None; + let mut holder_max_accepted_htlcs: Option = None; + + let mut pending_monitor_updates = Some(Vec::new()); read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -6816,8 +7066,10 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (23, channel_ready_event_emitted, option), (25, user_id_high_opt, option), (27, channel_keys_id, option), + (28, holder_max_accepted_htlcs, option), (29, temporary_channel_id, option), (31, channel_pending_event_emitted, option), + (33, pending_monitor_updates, vec_type), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -6870,6 +7122,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // separate u64 values. let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64); + let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS); + Ok(Channel { user_id, @@ -6898,6 +7152,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch cur_counterparty_commitment_transaction_number, value_to_self_msat, + holder_max_accepted_htlcs, pending_inbound_htlcs, pending_outbound_htlcs, holding_cell_htlc_updates, @@ -6970,6 +7225,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch next_remote_commitment_tx_fee_info_cached: Mutex::new(None), workaround_lnd_bug_4006: None, + sent_message_awaiting_response: None, latest_inbound_scid_alias, // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing @@ -6984,7 +7240,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_type: channel_type.unwrap(), channel_keys_id, - pending_monitor_updates: Vec::new(), + pending_monitor_updates: pending_monitor_updates.unwrap(), }) } } @@ -7005,14 +7261,15 @@ mod tests { use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; - use crate::ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT}; + use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; use crate::ln::script::ShutdownScript; use crate::ln::chan_utils; use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight}; use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget}; - use crate::chain::keysinterface::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider}; + use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider}; use crate::chain::transaction::OutPoint; + use crate::routing::router::Path; use crate::util::config::UserConfig; use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::errors::APIError; @@ -7075,17 +7332,17 @@ mod tests { fn read_chan_signer(&self, _data: &[u8]) -> Result { panic!(); } - fn get_destination_script(&self) -> Script { + fn get_destination_script(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize()); - Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script() + Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script()) } - fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { + fn get_shutdown_scriptpubkey(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); - ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)) + Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key))) } } @@ -7190,7 +7447,7 @@ mod tests { cltv_expiry: 200000000, state: OutboundHTLCState::Committed, source: HTLCSource::OutboundRoute { - path: Vec::new(), + path: Path { hops: Vec::new(), blinded_tail: None }, session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(), first_hop_htlc_msat: 548, payment_id: PaymentId([42; 32]), @@ -7305,12 +7562,7 @@ mod tests { let msg = node_b_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number - match msg.data_loss_protect { - OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => { - assert_eq!(your_last_per_commitment_secret, [0; 32]); - }, - _ => panic!() - } + assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); // Check that the commitment point in Node A's channel_reestablish message // is sane. @@ -7318,12 +7570,7 @@ mod tests { let msg = node_a_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number - match msg.data_loss_protect { - OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => { - assert_eq!(your_last_per_commitment_secret, [0; 32]); - }, - _ => panic!() - } + assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); } #[test] @@ -7505,7 +7752,7 @@ mod tests { } } - #[cfg(not(feature = "grind_signatures"))] + #[cfg(feature = "_test_vectors")] #[test] fn outbound_commitment_test() { use bitcoin::util::sighash; @@ -7514,7 +7761,7 @@ mod tests { use bitcoin::hashes::hex::FromHex; use bitcoin::hash_types::Txid; use bitcoin::secp256k1::Message; - use crate::chain::keysinterface::EcdsaChannelSigner; + use crate::sign::EcdsaChannelSigner; use crate::ln::PaymentPreimage; use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys}; use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; @@ -7538,6 +7785,7 @@ mod tests { [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff], 10_000_000, [0; 32], + [0; 32], ); assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],