X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=8232c5e1b1611e22b2a91f467f5a11e35a526215;hb=67868aec721b38c993ce0e48ecc46549092963f6;hp=a721d79b203b206186ffce490f75b0b90460ca4a;hpb=1d9a70952958bc02f426f1a04a1cfcc4bc6b0cff;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a721d79b..8232c5e1 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -27,7 +27,7 @@ use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; use crate::ln::msgs::DecodeError; use crate::ln::script::{self, ShutdownScript}; -use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState}; use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; use crate::ln::chan_utils; use crate::ln::onion_utils::HTLCFailReason; @@ -41,7 +41,7 @@ use crate::routing::gossip::NodeId; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; use crate::util::logger::Logger; use crate::util::errors::APIError; -use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits}; +use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure}; use crate::util::scid_utils::scid_from_parts; use crate::io; @@ -224,6 +224,7 @@ struct OutboundHTLCOutput { payment_hash: PaymentHash, state: OutboundHTLCState, source: HTLCSource, + skimmed_fee_msat: Option, } /// See AwaitingRemoteRevoke ChannelState for more info @@ -235,6 +236,8 @@ enum HTLCUpdateAwaitingACK { payment_hash: PaymentHash, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, + // The extra fee we're skimming off the top of this HTLC. + skimmed_fee_msat: Option, }, ClaimHTLC { payment_preimage: PaymentPreimage, @@ -485,13 +488,13 @@ enum UpdateFulfillFetch { } /// The return type of get_update_fulfill_htlc_and_commit. -pub enum UpdateFulfillCommitFetch<'a> { +pub enum UpdateFulfillCommitFetch { /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed /// it in the holding cell, or re-generated the update_fulfill message after the same claim was /// previously placed in the holding cell (and has since been removed). NewClaim { /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor - monitor_update: &'a ChannelMonitorUpdate, + monitor_update: ChannelMonitorUpdate, /// The value of the HTLC which was claimed, in msat. htlc_value_msat: u64, }, @@ -524,6 +527,10 @@ pub(super) struct ReestablishResponses { } /// The return type of `force_shutdown` +/// +/// Contains a (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple +/// followed by a list of HTLCs to fail back in the form of the (source, payment hash, and this +/// channel's counterparty_node_id and channel_id). pub(crate) type ShutdownResult = ( Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])> @@ -585,17 +592,10 @@ pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2; struct PendingChannelMonitorUpdate { update: ChannelMonitorUpdate, - /// In some cases we need to delay letting the [`ChannelMonitorUpdate`] go until after an - /// `Event` is processed by the user. This bool indicates the [`ChannelMonitorUpdate`] is - /// blocked on some external event and the [`ChannelManager`] will update us when we're ready. - /// - /// [`ChannelManager`]: super::channelmanager::ChannelManager - blocked: bool, } impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { (0, update, required), - (2, blocked, required), }); /// Contains everything about the channel including state, and various flags. @@ -866,11 +866,9 @@ pub(super) struct ChannelContext { /// [`SignerProvider::derive_channel_signer`]. channel_keys_id: [u8; 32], - /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately. - /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence - /// completes we still need to be able to complete the persistence. Thus, we have to keep a - /// copy of the [`ChannelMonitorUpdate`] here until it is complete. - pending_monitor_updates: Vec, + /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we + /// store it here and only release it to the `ChannelManager` once it asks for it. + blocked_monitor_updates: Vec, } impl ChannelContext { @@ -909,6 +907,34 @@ impl ChannelContext { (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready } + /// shutdown state returns the state of the channel in its various stages of shutdown + pub fn shutdown_state(&self) -> ChannelShutdownState { + if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 { + return ChannelShutdownState::ShutdownComplete; + } + if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 { + return ChannelShutdownState::ShutdownInitiated; + } + if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() { + return ChannelShutdownState::ResolvingHTLCs; + } + if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() { + return ChannelShutdownState::NegotiatingClosingFee; + } + return ChannelShutdownState::NotShuttingDown; + } + + fn closing_negotiation_ready(&self) -> bool { + self.pending_inbound_htlcs.is_empty() && + self.pending_outbound_htlcs.is_empty() && + self.pending_update_fee.is_none() && + self.channel_state & + (BOTH_SIDES_SHUTDOWN_MASK | + ChannelState::AwaitingRemoteRevoke as u32 | + ChannelState::PeerDisconnected as u32 | + ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK + } + /// Returns true if this channel is currently available for use. This is a superset of /// is_usable() and considers things like the channel being temporarily disabled. /// Allowed in any state (including after shutdown) @@ -1065,8 +1091,18 @@ impl ChannelContext { cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA) } - pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 { - self.config.options.max_dust_htlc_exposure_msat + pub fn get_max_dust_htlc_exposure_msat(&self, + fee_estimator: &LowerBoundedFeeEstimator) -> u64 + where F::Target: FeeEstimator + { + match self.config.options.max_dust_htlc_exposure { + MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { + let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight( + ConfirmationTarget::HighPriority); + feerate_per_kw as u64 * multiplier + }, + MaxDustHTLCExposure::FixedLimitMsat(limit) => limit, + } } /// Returns the previous [`ChannelConfig`] applied to this channel, if any. @@ -1539,7 +1575,10 @@ impl ChannelContext { /// Doesn't bother handling the /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC /// corner case properly. - pub fn get_available_balances(&self) -> AvailableBalances { + pub fn get_available_balances(&self, fee_estimator: &LowerBoundedFeeEstimator) + -> AvailableBalances + where F::Target: FeeEstimator + { let context = &self; // Note that we have to handle overflow due to the above case. let inbound_stats = context.get_inbound_pending_htlc_stats(None); @@ -1621,6 +1660,7 @@ impl ChannelContext { // send above the dust limit (as the router can always overpay to meet the dust limit). let mut remaining_msat_below_dust_exposure_limit = None; let mut dust_exposure_dust_limit_msat = 0; + let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator); let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis) @@ -1630,17 +1670,17 @@ impl ChannelContext { context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000) }; let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 { + if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 { remaining_msat_below_dust_exposure_limit = - Some(context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat)); + Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat)); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000); } let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; - if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > context.get_max_dust_htlc_exposure_msat() as i64 { + if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 { remaining_msat_below_dust_exposure_limit = Some(cmp::min( remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()), - context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat))); + max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat))); dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000); } @@ -2256,7 +2296,7 @@ impl Channel { } pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { - let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked); + let release_cs_monitor = self.context.blocked_monitor_updates.is_empty(); match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => { // Even if we aren't supposed to let new monitor updates with commitment state @@ -2264,43 +2304,30 @@ impl Channel { // matter what. Sadly, to push a new monitor update which flies before others // already queued, we have to insert it into the pending queue and update the // update_ids of all the following monitors. - let unblocked_update_pos = if release_cs_monitor && msg.is_some() { + if release_cs_monitor && msg.is_some() { let mut additional_update = self.build_commitment_no_status_check(logger); // build_commitment_no_status_check may bump latest_monitor_id but we want them // to be strictly increasing by one, so decrement it here. self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { - update: monitor_update, blocked: false, - }); - self.context.pending_monitor_updates.len() - 1 } else { - let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked) - .unwrap_or(self.context.pending_monitor_updates.len()); - let new_mon_id = self.context.pending_monitor_updates.get(insert_pos) + let new_mon_id = self.context.blocked_monitor_updates.get(0) .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id); monitor_update.update_id = new_mon_id; - self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate { - update: monitor_update, blocked: false, - }); - for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) { + for held_update in self.context.blocked_monitor_updates.iter_mut() { held_update.update.update_id += 1; } if msg.is_some() { debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set"); let update = self.build_commitment_no_status_check(logger); - self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { - update, blocked: true, + self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate { + update, }); } - insert_pos - }; - self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new()); - UpdateFulfillCommitFetch::NewClaim { - monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos) - .expect("We just pushed the monitor update").update, - htlc_value_msat, } + + self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new()); + UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, } }, UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, } @@ -2571,8 +2598,13 @@ impl Channel { Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger)) } - pub fn update_add_htlc(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError> - where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger { + pub fn update_add_htlc( + &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, + create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> Result<(), ChannelError> + where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, + FE::Target: FeeEstimator, L::Target: Logger, + { // We can't accept HTLCs sent after we've sent a shutdown. let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32); if local_sent_shutdown { @@ -2625,6 +2657,7 @@ impl Channel { } } + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { (0, 0) } else { @@ -2635,9 +2668,9 @@ impl Channel { let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; - if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() { + if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat()); + on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } @@ -2645,9 +2678,9 @@ impl Channel { let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; - if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() { + if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", - on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat()); + on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); } } @@ -2790,7 +2823,7 @@ impl Channel { Ok(()) } - pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result, ChannelError> + pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result, ChannelError> where L::Target: Logger { if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { @@ -3014,16 +3047,24 @@ impl Channel { /// Public version of the below, checking relevant preconditions first. /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and /// returns `(None, Vec::new())`. - pub fn maybe_free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { + pub fn maybe_free_holding_cell_htlcs( + &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> (Option, Vec<(HTLCSource, PaymentHash)>) + where F::Target: FeeEstimator, L::Target: Logger + { if self.context.channel_state >= ChannelState::ChannelReady as u32 && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { - self.free_holding_cell_htlcs(logger) + self.free_holding_cell_htlcs(fee_estimator, logger) } else { (None, Vec::new()) } } /// Frees any pending commitment updates in the holding cell, generating the relevant messages /// for our counterparty. - fn free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { + fn free_holding_cell_htlcs( + &mut self, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> (Option, Vec<(HTLCSource, PaymentHash)>) + where F::Target: FeeEstimator, L::Target: Logger + { assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() { log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(), @@ -3047,8 +3088,13 @@ impl Channel { // handling this case better and maybe fulfilling some of the HTLCs while attempting // to rebalance channels. match &htlc_update { - &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => { - match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) { + &HTLCUpdateAwaitingACK::AddHTLC { + amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, + skimmed_fee_msat, .. + } => { + match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), + onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger) + { Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()), Err(e) => { match e { @@ -3107,7 +3153,7 @@ impl Channel { return (None, htlcs_to_fail); } let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() { - self.send_update_fee(feerate, false, logger) + self.send_update_fee(feerate, false, fee_estimator, logger) } else { None }; @@ -3134,8 +3180,10 @@ impl Channel { /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. - pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError> - where L::Target: Logger, + pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option), ChannelError> + where F::Target: FeeEstimator, L::Target: Logger, { if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned())); @@ -3335,9 +3383,8 @@ impl Channel { return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update))); } - match self.free_holding_cell_htlcs(logger) { - (Some(_), htlcs_to_fail) => { - let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update; + match self.free_holding_cell_htlcs(fee_estimator, logger) { + (Some(mut additional_update), htlcs_to_fail) => { // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be // strictly increasing by one, so decrement it here. self.context.latest_monitor_update_id = monitor_update.update_id; @@ -3371,8 +3418,11 @@ impl Channel { /// Queues up an outbound update fee by placing it in the holding cell. You should call /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the /// commitment update. - pub fn queue_update_fee(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger { - let msg_opt = self.send_update_fee(feerate_per_kw, true, logger); + pub fn queue_update_fee(&mut self, feerate_per_kw: u32, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L) + where F::Target: FeeEstimator, L::Target: Logger + { + let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger); assert!(msg_opt.is_none(), "We forced holding cell?"); } @@ -3383,7 +3433,12 @@ impl Channel { /// /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this /// [`Channel`] if `force_holding_cell` is false. - fn send_update_fee(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option where L::Target: Logger { + fn send_update_fee( + &mut self, feerate_per_kw: u32, mut force_holding_cell: bool, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> Option + where F::Target: FeeEstimator, L::Target: Logger + { if !self.context.is_outbound() { panic!("Cannot send fee from inbound channel"); } @@ -3410,11 +3465,12 @@ impl Channel { // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`. let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); + if holder_tx_dust_exposure > max_dust_htlc_exposure_msat { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } - if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { + if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw); return None; } @@ -3553,12 +3609,6 @@ impl Channel { { assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32); self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32); - let mut found_blocked = false; - self.context.pending_monitor_updates.retain(|upd| { - if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); } - if upd.blocked { found_blocked = true; } - upd.blocked - }); // If we're past (or at) the FundingSent stage on an outbound channel, try to // (re-)broadcast the funding transaction as we may have declined to broadcast it when we @@ -3651,11 +3701,12 @@ impl Channel { let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; - if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); + if holder_tx_dust_exposure > max_dust_htlc_exposure_msat { return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", msg.feerate_per_kw, holder_tx_dust_exposure))); } - if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() { + if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat { return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", msg.feerate_per_kw, counterparty_tx_dust_exposure))); } @@ -3690,6 +3741,7 @@ impl Channel { payment_hash: htlc.payment_hash, cltv_expiry: htlc.cltv_expiry, onion_routing_packet: (**onion_packet).clone(), + skimmed_fee_msat: htlc.skimmed_fee_msat, }); } } @@ -3976,12 +4028,7 @@ impl Channel { /// this point if we're the funder we should send the initial closing_signed, and in any case /// shutdown should complete within a reasonable timeframe. fn closing_negotiation_ready(&self) -> bool { - self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() && - self.context.channel_state & - (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 | - ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) - == BOTH_SIDES_SHUTDOWN_MASK && - self.context.pending_update_fee.is_none() + self.context.closing_negotiation_ready() } /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning @@ -4061,7 +4108,7 @@ impl Channel { pub fn shutdown( &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown - ) -> Result<(Option, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError> + ) -> Result<(Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError> where SP::Target: SignerProvider { if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -4127,9 +4174,7 @@ impl Channel { }], }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - if self.push_blockable_mon_update(monitor_update) { - self.context.pending_monitor_updates.last().map(|upd| &upd.update) - } else { None } + self.push_ret_blockable_mon_update(monitor_update) } else { None }; let shutdown = if send_shutdown { Some(msgs::Shutdown { @@ -4419,64 +4464,37 @@ impl Channel { (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 } - pub fn get_latest_complete_monitor_update_id(&self) -> u64 { - if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); } - self.context.pending_monitor_updates[0].update.update_id - 1 + /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight. + pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 { + if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); } + self.context.blocked_monitor_updates[0].update.update_id - 1 } /// Returns the next blocked monitor update, if one exists, and a bool which indicates a /// further blocked monitor update exists after the next. - pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> { - for i in 0..self.context.pending_monitor_updates.len() { - if self.context.pending_monitor_updates[i].blocked { - self.context.pending_monitor_updates[i].blocked = false; - return Some((&self.context.pending_monitor_updates[i].update, - self.context.pending_monitor_updates.len() > i + 1)); - } - } - None + pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> { + if self.context.blocked_monitor_updates.is_empty() { return None; } + Some((self.context.blocked_monitor_updates.remove(0).update, + !self.context.blocked_monitor_updates.is_empty())) } - /// Pushes a new monitor update into our monitor update queue, returning whether it should be - /// immediately given to the user for persisting or if it should be held as blocked. - fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool { - let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked); - self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate { - update, blocked: !release_monitor - }); - release_monitor - } - - /// Pushes a new monitor update into our monitor update queue, returning a reference to it if - /// it should be immediately given to the user for persisting or `None` if it should be held as - /// blocked. + /// Pushes a new monitor update into our monitor update queue, returning it if it should be + /// immediately given to the user for persisting or `None` if it should be held as blocked. fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) - -> Option<&ChannelMonitorUpdate> { - let release_monitor = self.push_blockable_mon_update(update); - if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None } - } - - pub fn no_monitor_updates_pending(&self) -> bool { - self.context.pending_monitor_updates.is_empty() - } - - pub fn complete_all_mon_updates_through(&mut self, update_id: u64) { - self.context.pending_monitor_updates.retain(|upd| { - if upd.update.update_id <= update_id { - assert!(!upd.blocked, "Completed update must have flown"); - false - } else { true } - }); - } - - pub fn complete_one_mon_update(&mut self, update_id: u64) { - self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id); + -> Option { + let release_monitor = self.context.blocked_monitor_updates.is_empty(); + if !release_monitor { + self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate { + update, + }); + None + } else { + Some(update) + } } - /// Returns an iterator over all unblocked monitor updates which have not yet completed. - pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator { - self.context.pending_monitor_updates.iter() - .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) }) + pub fn blocked_monitor_updates_pending(&self) -> usize { + self.context.blocked_monitor_updates.len() } /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor. @@ -5037,11 +5055,16 @@ impl Channel { /// commitment update. /// /// `Err`s will only be [`ChannelError::Ignore`]. - pub fn queue_add_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, - onion_routing_packet: msgs::OnionPacket, logger: &L) - -> Result<(), ChannelError> where L::Target: Logger { + pub fn queue_add_htlc( + &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, + onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> Result<(), ChannelError> + where F::Target: FeeEstimator, L::Target: Logger + { self - .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger) + .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, + skimmed_fee_msat, fee_estimator, logger) .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) .map_err(|err| { if let ChannelError::Ignore(_) = err { /* fine */ } @@ -5066,9 +5089,13 @@ impl Channel { /// on this [`Channel`] if `force_holding_cell` is false. /// /// `Err`s will only be [`ChannelError::Ignore`]. - fn send_htlc(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, - onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L) - -> Result, ChannelError> where L::Target: Logger { + fn send_htlc( + &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, + onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, + skimmed_fee_msat: Option, fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> Result, ChannelError> + where F::Target: FeeEstimator, L::Target: Logger + { if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) { return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned())); } @@ -5081,7 +5108,7 @@ impl Channel { return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned())); } - let available_balances = self.context.get_available_balances(); + let available_balances = self.context.get_available_balances(fee_estimator); if amount_msat < available_balances.next_outbound_htlc_minimum_msat { return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat", available_balances.next_outbound_htlc_minimum_msat))); @@ -5120,6 +5147,7 @@ impl Channel { cltv_expiry, source, onion_routing_packet, + skimmed_fee_msat, }); return Ok(None); } @@ -5131,6 +5159,7 @@ impl Channel { cltv_expiry, state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())), source, + skimmed_fee_msat, }); let res = msgs::UpdateAddHTLC { @@ -5140,6 +5169,7 @@ impl Channel { payment_hash, cltv_expiry, onion_routing_packet, + skimmed_fee_msat, }; self.context.next_holder_htlc_id += 1; @@ -5278,8 +5308,15 @@ impl Channel { /// /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info. - pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { - let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger); + pub fn send_htlc_and_commit( + &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, + source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L + ) -> Result, ChannelError> + where F::Target: FeeEstimator, L::Target: Logger + { + let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, + onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger); if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } } match send_res? { Some(_) => { @@ -5311,7 +5348,7 @@ impl Channel { /// [`ChannelMonitorUpdate`] will be returned). pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option) - -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError> + -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> where SP::Target: SignerProvider { for htlc in self.context.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { @@ -5382,9 +5419,7 @@ impl Channel { }], }; self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - if self.push_blockable_mon_update(monitor_update) { - self.context.pending_monitor_updates.last().map(|upd| &upd.update) - } else { None } + self.push_ret_blockable_mon_update(monitor_update) } else { None }; let shutdown = msgs::Shutdown { channel_id: self.context.channel_id, @@ -5622,7 +5657,7 @@ impl OutboundV1Channel { channel_type, channel_keys_id, - pending_monitor_updates: Vec::new(), + blocked_monitor_updates: Vec::new(), } }) } @@ -5707,12 +5742,9 @@ impl OutboundV1Channel { // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we // set it now. If they don't understand it, we'll fall back to our default of // `only_static_remotekey`. - #[cfg(anchors)] - { // Attributes are not allowed on if expressions on our current MSRV of 1.41. - if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && - their_features.supports_anchors_zero_fee_htlc_tx() { - ret.set_anchors_zero_fee_htlc_tx_required(); - } + if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx && + their_features.supports_anchors_zero_fee_htlc_tx() { + ret.set_anchors_zero_fee_htlc_tx_required(); } ret @@ -5744,6 +5776,7 @@ impl OutboundV1Channel { } else { self.context.channel_type = ChannelTypeFeatures::only_static_remote_key(); } + self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone(); Ok(self.get_open_channel(chain_hash)) } @@ -5862,7 +5895,8 @@ impl OutboundV1Channel { if channel_type != ChannelTypeFeatures::only_static_remote_key() { return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); } - self.context.channel_type = channel_type; + self.context.channel_type = channel_type.clone(); + self.context.channel_transaction_parameters.channel_type_features = channel_type; } let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { @@ -6249,7 +6283,7 @@ impl InboundV1Channel { channel_type, channel_keys_id, - pending_monitor_updates: Vec::new(), + blocked_monitor_updates: Vec::new(), } }; @@ -6600,9 +6634,10 @@ impl Writeable for Channel { } let mut preimages: Vec<&Option> = vec![]; + let mut pending_outbound_skimmed_fees: Vec> = Vec::new(); (self.context.pending_outbound_htlcs.len() as u64).write(writer)?; - for htlc in self.context.pending_outbound_htlcs.iter() { + for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() { htlc.htlc_id.write(writer)?; htlc.amount_msat.write(writer)?; htlc.cltv_expiry.write(writer)?; @@ -6638,18 +6673,37 @@ impl Writeable for Channel { reason.write(writer)?; } } + if let Some(skimmed_fee) = htlc.skimmed_fee_msat { + if pending_outbound_skimmed_fees.is_empty() { + for _ in 0..idx { pending_outbound_skimmed_fees.push(None); } + } + pending_outbound_skimmed_fees.push(Some(skimmed_fee)); + } else if !pending_outbound_skimmed_fees.is_empty() { + pending_outbound_skimmed_fees.push(None); + } } + let mut holding_cell_skimmed_fees: Vec> = Vec::new(); (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?; - for update in self.context.holding_cell_htlc_updates.iter() { + for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() { match update { - &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet } => { + &HTLCUpdateAwaitingACK::AddHTLC { + ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, + skimmed_fee_msat, + } => { 0u8.write(writer)?; amount_msat.write(writer)?; cltv_expiry.write(writer)?; payment_hash.write(writer)?; source.write(writer)?; onion_routing_packet.write(writer)?; + + if let Some(skimmed_fee) = skimmed_fee_msat { + if holding_cell_skimmed_fees.is_empty() { + for _ in 0..idx { holding_cell_skimmed_fees.push(None); } + } + holding_cell_skimmed_fees.push(Some(skimmed_fee)); + } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); } }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => { 1u8.write(writer)?; @@ -6802,10 +6856,11 @@ impl Writeable for Channel { (5, self.context.config, required), (6, serialized_holder_htlc_max_in_flight, option), (7, self.context.shutdown_scriptpubkey, option), + (8, self.context.blocked_monitor_updates, optional_vec), (9, self.context.target_closing_feerate_sats_per_kw, option), - (11, self.context.monitor_pending_finalized_fulfills, vec_type), + (11, self.context.monitor_pending_finalized_fulfills, required_vec), (13, self.context.channel_creation_height, required), - (15, preimages, vec_type), + (15, preimages, required_vec), (17, self.context.announcement_sigs_state, required), (19, self.context.latest_inbound_scid_alias, option), (21, self.context.outbound_scid_alias, required), @@ -6815,7 +6870,8 @@ impl Writeable for Channel { (28, holder_max_accepted_htlcs, option), (29, self.context.temporary_channel_id, option), (31, channel_pending_event_emitted, option), - (33, self.context.pending_monitor_updates, vec_type), + (35, pending_outbound_skimmed_fees, optional_vec), + (37, holding_cell_skimmed_fees, optional_vec), }); Ok(()) @@ -6926,6 +6982,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch }, _ => return Err(DecodeError::InvalidValue), }, + skimmed_fee_msat: None, }); } @@ -6939,6 +6996,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch payment_hash: Readable::read(reader)?, source: Readable::read(reader)?, onion_routing_packet: Readable::read(reader)?, + skimmed_fee_msat: None, }, 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, @@ -7035,7 +7093,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch _ => return Err(DecodeError::InvalidValue), }; - let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?; + let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?; let funding_transaction = Readable::read(reader)?; let counterparty_cur_commitment_point = Readable::read(reader)?; @@ -7092,7 +7150,10 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut temporary_channel_id: Option<[u8; 32]> = None; let mut holder_max_accepted_htlcs: Option = None; - let mut pending_monitor_updates = Some(Vec::new()); + let mut blocked_monitor_updates = Some(Vec::new()); + + let mut pending_outbound_skimmed_fees_opt: Option>> = None; + let mut holding_cell_skimmed_fees_opt: Option>> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -7103,10 +7164,11 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (5, config, option), // Note that if none is provided we will *not* overwrite the existing one. (6, holder_max_htlc_value_in_flight_msat, option), (7, shutdown_scriptpubkey, option), + (8, blocked_monitor_updates, optional_vec), (9, target_closing_feerate_sats_per_kw, option), - (11, monitor_pending_finalized_fulfills, vec_type), + (11, monitor_pending_finalized_fulfills, optional_vec), (13, channel_creation_height, option), - (15, preimages_opt, vec_type), + (15, preimages_opt, optional_vec), (17, announcement_sigs_state, option), (19, latest_inbound_scid_alias, option), (21, outbound_scid_alias, option), @@ -7116,7 +7178,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (28, holder_max_accepted_htlcs, option), (29, temporary_channel_id, option), (31, channel_pending_event_emitted, option), - (33, pending_monitor_updates, vec_type), + (35, pending_outbound_skimmed_fees_opt, optional_vec), + (37, holding_cell_skimmed_fees_opt, optional_vec), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -7161,6 +7224,10 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch return Err(DecodeError::UnknownRequiredFeature); } + // ChannelTransactionParameters may have had an empty features set upon deserialization. + // To account for that, we're proactively setting/overriding the field here. + channel_parameters.channel_type_features = chan_features.clone(); + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); @@ -7171,6 +7238,25 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS); + if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt { + let mut iter = skimmed_fees.into_iter(); + for htlc in pending_outbound_htlcs.iter_mut() { + htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?; + } + // We expect all skimmed fees to be consumed above + if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + } + if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt { + let mut iter = skimmed_fees.into_iter(); + for htlc in holding_cell_htlc_updates.iter_mut() { + if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc { + *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + // We expect all skimmed fees to be consumed above + if iter.next().is_some() { return Err(DecodeError::InvalidValue) } + } + Ok(Channel { context: ChannelContext { user_id, @@ -7288,7 +7374,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_type: channel_type.unwrap(), channel_keys_id, - pending_monitor_updates: pending_monitor_updates.unwrap(), + blocked_monitor_updates: blocked_monitor_updates.unwrap(), } }) } @@ -7305,7 +7391,6 @@ mod tests { use hex; use crate::ln::PaymentHash; use crate::ln::channelmanager::{self, HTLCSource, PaymentId}; - #[cfg(anchors)] use crate::ln::channel::InitFeatures; use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; @@ -7395,7 +7480,7 @@ mod tests { } } - #[cfg(not(feature = "grind_signatures"))] + #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))] fn public_from_secret_hex(secp_ctx: &Secp256k1, hex: &str) -> PublicKey { PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap()) } @@ -7513,7 +7598,8 @@ mod tests { session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(), first_hop_htlc_msat: 548, payment_id: PaymentId([42; 32]), - } + }, + skimmed_fee_msat: None, }); // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass @@ -8070,6 +8156,7 @@ mod tests { payment_hash: PaymentHash([0; 32]), state: OutboundHTLCState::Committed, source: HTLCSource::dummy(), + skimmed_fee_msat: None, }; out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner(); out @@ -8082,6 +8169,7 @@ mod tests { payment_hash: PaymentHash([0; 32]), state: OutboundHTLCState::Committed, source: HTLCSource::dummy(), + skimmed_fee_msat: None, }; out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner(); out @@ -8492,6 +8580,7 @@ mod tests { payment_hash: PaymentHash([0; 32]), state: OutboundHTLCState::Committed, source: HTLCSource::dummy(), + skimmed_fee_msat: None, }; out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner(); out @@ -8504,6 +8593,7 @@ mod tests { payment_hash: PaymentHash([0; 32]), state: OutboundHTLCState::Committed, source: HTLCSource::dummy(), + skimmed_fee_msat: None, }; out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner(); out @@ -8624,7 +8714,6 @@ mod tests { assert!(res.is_ok()); } - #[cfg(anchors)] #[test] fn test_supports_anchors_zero_htlc_tx_fee() { // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the @@ -8670,7 +8759,6 @@ mod tests { assert_eq!(channel_b.context.channel_type, expected_channel_type); } - #[cfg(anchors)] #[test] fn test_rejects_implicit_simple_anchors() { // Tests that if `option_anchors` is being negotiated implicitly through the intersection of @@ -8711,7 +8799,6 @@ mod tests { assert!(channel_b.is_err()); } - #[cfg(anchors)] #[test] fn test_rejects_simple_anchors_channel_type() { // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,