X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=58d35a1a95a4ddfc05b29d7fcfe4db5bdf6e89a7;hb=c01745eec7414a340404398255dcd54580380348;hp=2d72082432443cbe35ab1b7003b8e80d708c4c78;hpb=e142e4a4e6c856f533ec350ae61c1bbe2a3f20e9;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2d720824..58d35a1a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -50,7 +50,6 @@ use crate::util::scid_utils::scid_from_parts; use crate::io; use crate::prelude::*; use core::{cmp,mem,fmt}; -use core::convert::TryInto; use core::ops::Deref; #[cfg(any(test, fuzzing, debug_assertions))] use crate::sync::Mutex; @@ -104,10 +103,38 @@ enum InboundHTLCRemovalReason { Fulfill(PaymentPreimage), } +/// Represents the resolution status of an inbound HTLC. +#[derive(Clone)] +enum InboundHTLCResolution { + /// Resolved implies the action we must take with the inbound HTLC has already been determined, + /// i.e., we already know whether it must be failed back or forwarded. + // + // TODO: Once this variant is removed, we should also clean up + // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable. + Resolved { + pending_htlc_status: PendingHTLCStatus, + }, + /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed + /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both + /// nodes for the state update in which it was proposed. + Pending { + update_add_htlc: msgs::UpdateAddHTLC, + }, +} + +impl_writeable_tlv_based_enum!(InboundHTLCResolution, + (0, Resolved) => { + (0, pending_htlc_status, required), + }, + (2, Pending) => { + (0, update_add_htlc, required), + }; +); + enum InboundHTLCState { /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an /// update_add_htlc message for this HTLC. - RemoteAnnounced(PendingHTLCStatus), + RemoteAnnounced(InboundHTLCResolution), /// Included in a received commitment_signed message (implying we've /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous /// state (see the example below). We have not yet included this HTLC in a @@ -137,13 +164,13 @@ enum InboundHTLCState { /// Implies AwaitingRemoteRevoke. /// /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md - AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus), + AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution), /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it). /// We have also included this HTLC in our latest commitment_signed and are now just waiting /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the /// channel (before it can then get forwarded and/or removed). /// Implies AwaitingRemoteRevoke. - AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus), + AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution), Committed, /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we /// created it we would have put it in the holding cell instead). When they next revoke_and_ack @@ -1044,6 +1071,7 @@ pub(super) struct MonitorRestoreUpdates { pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, pub finalized_claimed_htlcs: Vec, + pub pending_update_adds: Vec, pub funding_broadcastable: Option, pub channel_ready: Option, pub announcement_sigs: Option, @@ -1161,6 +1189,10 @@ impl_writeable_tlv_based!(PendingChannelMonitorUpdate, { pub(super) enum ChannelPhase where SP::Target: SignerProvider { UnfundedOutboundV1(OutboundV1Channel), UnfundedInboundV1(InboundV1Channel), + #[cfg(dual_funding)] + UnfundedOutboundV2(OutboundV2Channel), + #[cfg(dual_funding)] + UnfundedInboundV2(InboundV2Channel), Funded(Channel), } @@ -1173,6 +1205,10 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::Funded(chan) => &chan.context, ChannelPhase::UnfundedOutboundV1(chan) => &chan.context, ChannelPhase::UnfundedInboundV1(chan) => &chan.context, + #[cfg(dual_funding)] + ChannelPhase::UnfundedOutboundV2(chan) => &chan.context, + #[cfg(dual_funding)] + ChannelPhase::UnfundedInboundV2(chan) => &chan.context, } } @@ -1181,6 +1217,10 @@ impl<'a, SP: Deref> ChannelPhase where ChannelPhase::Funded(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context, ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context, + #[cfg(dual_funding)] + ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context, + #[cfg(dual_funding)] + ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context, } } } @@ -1279,6 +1319,7 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec, + monitor_pending_update_adds: Vec, /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`]) /// but our signer (initially) refused to give us a signature, we should retry at some point in @@ -1743,6 +1784,7 @@ impl ChannelContext where SP::Target: SignerProvider { monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), + monitor_pending_update_adds: Vec::new(), signer_pending_commitment_update: false, signer_pending_funding: false, @@ -1964,6 +2006,7 @@ impl ChannelContext where SP::Target: SignerProvider { monitor_pending_forwards: Vec::new(), monitor_pending_failures: Vec::new(), monitor_pending_finalized_fulfills: Vec::new(), + monitor_pending_update_adds: Vec::new(), signer_pending_commitment_update: false, signer_pending_funding: false, @@ -2687,7 +2730,7 @@ impl ChannelContext where SP::Target: SignerProvider { feerate_per_kw = cmp::max(feerate_per_kw, feerate); } let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000); - cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value())) + cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value())) } /// Get forwarding information for the counterparty. @@ -4074,23 +4117,15 @@ impl Channel where log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id()); - Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger)) + Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger)) } - pub fn update_add_htlc( - &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, - create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator, logger: &L - ) -> Result<(), ChannelError> - where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, - FE::Target: FeeEstimator, L::Target: Logger, - { + pub fn update_add_htlc( + &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus, + ) -> Result<(), ChannelError> { if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); } - // We can't accept HTLCs sent after we've sent a shutdown. - if self.context.channel_state.is_local_shutdown_sent() { - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8); - } // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec. if self.context.channel_state.is_remote_shutdown_sent() { return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned())); @@ -4109,7 +4144,6 @@ impl Channel where } let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); - let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 { return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs))); } @@ -4138,34 +4172,6 @@ impl Channel where } } - let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); - let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - (0, 0) - } else { - let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; - (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000, - dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000) - }; - let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { - let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; - if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", - on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - - let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; - if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { - let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; - if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { - log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", - on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } - let pending_value_to_self_msat = self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; let pending_remote_value_msat = @@ -4199,23 +4205,7 @@ impl Channel where } else { 0 }; - if !self.context.is_outbound() { - // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from - // the spec because the fee spike buffer requirement doesn't exist on the receiver's - // side, only on the sender's. Note that with anchor outputs we are no longer as - // sensitive to fee spikes, so we need to account for them. - let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); - let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); - if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; - } - if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { - // Note that if the pending_forward_status is not updated here, then it's because we're already failing - // the HTLC, i.e. its status is already set to failing. - log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); - pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7); - } - } else { + if self.context.is_outbound() { // Check that they won't violate our local required channel reserve by adding this HTLC. let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None); @@ -4243,7 +4233,9 @@ impl Channel where amount_msat: msg.amount_msat, payment_hash: msg.payment_hash, cltv_expiry: msg.cltv_expiry, - state: InboundHTLCState::RemoteAnnounced(pending_forward_status), + state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved { + pending_htlc_status: pending_forward_status + }), }); Ok(()) } @@ -4449,13 +4441,13 @@ impl Channel where } for htlc in self.context.pending_inbound_htlcs.iter_mut() { - let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state { - Some(forward_info.clone()) + let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state { + Some(resolution.clone()) } else { None }; - if let Some(forward_info) = new_forward { + if let Some(htlc_resolution) = htlc_resolution { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.", &htlc.payment_hash, &self.context.channel_id); - htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info); + htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution); need_commitment = true; } } @@ -4765,6 +4757,7 @@ impl Channel where log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id()); let mut to_forward_infos = Vec::new(); + let mut pending_update_adds = Vec::new(); let mut revoked_htlcs = Vec::new(); let mut finalized_claimed_htlcs = Vec::new(); let mut update_fail_htlcs = Vec::new(); @@ -4812,29 +4805,37 @@ impl Channel where let mut state = InboundHTLCState::Committed; mem::swap(&mut state, &mut htlc.state); - if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state { + if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state { log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash); - htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info); + htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution); require_commitment = true; - } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state { - match forward_info { - PendingHTLCStatus::Fail(fail_msg) => { - log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); - require_commitment = true; - match fail_msg { - HTLCFailureMsg::Relay(msg) => { - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); - update_fail_htlcs.push(msg) - }, - HTLCFailureMsg::Malformed(msg) => { - htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); - update_fail_malformed_htlcs.push(msg) + } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state { + match resolution { + InboundHTLCResolution::Resolved { pending_htlc_status } => + match pending_htlc_status { + PendingHTLCStatus::Fail(fail_msg) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash); + require_commitment = true; + match fail_msg { + HTLCFailureMsg::Relay(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone())); + update_fail_htlcs.push(msg) + }, + HTLCFailureMsg::Malformed(msg) => { + htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code))); + update_fail_malformed_htlcs.push(msg) + }, + } }, + PendingHTLCStatus::Forward(forward_info) => { + log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash); + to_forward_infos.push((forward_info, htlc.htlc_id)); + htlc.state = InboundHTLCState::Committed; + } } - }, - PendingHTLCStatus::Forward(forward_info) => { + InboundHTLCResolution::Pending { update_add_htlc } => { log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash); - to_forward_infos.push((forward_info, htlc.htlc_id)); + pending_update_adds.push(update_add_htlc); htlc.state = InboundHTLCState::Committed; } } @@ -4895,6 +4896,8 @@ impl Channel where } } + self.context.monitor_pending_update_adds.append(&mut pending_update_adds); + if self.context.channel_state.is_monitor_update_in_progress() { // We can't actually generate a new commitment transaction (incl by freeing holding // cells) while we can't update the monitor, so we just return what we have. @@ -5195,13 +5198,16 @@ impl Channel where mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures); let mut finalized_claimed_htlcs = Vec::new(); mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills); + let mut pending_update_adds = Vec::new(); + mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds); if self.context.channel_state.is_peer_disconnected() { self.context.monitor_pending_revoke_and_ack = false; self.context.monitor_pending_commitment_signed = false; return MonitorRestoreUpdates { raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst, - accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs + accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds, + funding_broadcastable, channel_ready, announcement_sigs }; } @@ -5223,7 +5229,8 @@ impl Channel where if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" }, match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"}); MonitorRestoreUpdates { - raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs + raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, + pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs } } @@ -5463,7 +5470,7 @@ impl Channel where let shutdown_msg = self.get_outbound_shutdown(); - let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger); + let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger); if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) { // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's. @@ -6087,6 +6094,86 @@ impl Channel where }) } + pub fn can_accept_incoming_htlc( + &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator, logger: L + ) -> Result<(), (&'static str, u16)> + where + F::Target: FeeEstimator, + L::Target: Logger + { + if self.context.channel_state.is_local_shutdown_sent() { + return Err(("Shutdown was already sent", 0x4000|8)) + } + + let inbound_stats = self.context.get_inbound_pending_htlc_stats(None); + let outbound_stats = self.context.get_outbound_pending_htlc_stats(None); + let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator); + let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + (0, 0) + } else { + let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64; + (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000, + dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000) + }; + let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats { + let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat; + if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); + return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7)) + } + } + + let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis; + if msg.amount_msat / 1000 < exposure_dust_limit_success_sats { + let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat; + if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat { + log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", + on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat); + return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7)) + } + } + + let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000 + } else { + 0 + }; + + let mut removed_outbound_total_msat = 0; + for ref htlc in self.context.pending_outbound_htlcs.iter() { + if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state { + removed_outbound_total_msat += htlc.amount_msat; + } + } + + let pending_value_to_self_msat = + self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat; + let pending_remote_value_msat = + self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat; + + if !self.context.is_outbound() { + // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from + // the spec because the fee spike buffer requirement doesn't exist on the receiver's + // side, only on the sender's. Note that with anchor outputs we are no longer as + // sensitive to fee spikes, so we need to account for them. + let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); + let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(())); + if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { + remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; + } + if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat { + log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id()); + return Err(("Fee spike buffer violation", 0x1000|7)); + } + } + + Ok(()) + } + pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 { self.context.cur_holder_commitment_transaction_number + 1 } @@ -8220,7 +8307,7 @@ fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) ret } -const SERIALIZATION_VERSION: u8 = 3; +const SERIALIZATION_VERSION: u8 = 4; const MIN_SERIALIZATION_VERSION: u8 = 3; impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,; @@ -8282,7 +8369,18 @@ impl Writeable for Channel where SP::Target: SignerProvider { // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been // called. - write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION); + let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state { + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)| + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { + matches!(htlc_resolution, InboundHTLCResolution::Pending { .. }) + }, + _ => false, + }) { + SERIALIZATION_VERSION + } else { + MIN_SERIALIZATION_VERSION + }; + write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION); // `user_id` used to be a single u64 value. In order to remain backwards compatible with // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write @@ -8338,13 +8436,29 @@ impl Writeable for Channel where SP::Target: SignerProvider { htlc.payment_hash.write(writer)?; match &htlc.state { &InboundHTLCState::RemoteAnnounced(_) => unreachable!(), - &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => { + &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => { 1u8.write(writer)?; - htlc_state.write(writer)?; + if version_to_write <= 3 { + if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { + pending_htlc_status.write(writer)?; + } else { + panic!(); + } + } else { + htlc_resolution.write(writer)?; + } }, - &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => { + &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => { 2u8.write(writer)?; - htlc_state.write(writer)?; + if version_to_write <= 3 { + if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution { + pending_htlc_status.write(writer)?; + } else { + panic!(); + } + } else { + htlc_resolution.write(writer)?; + } }, &InboundHTLCState::Committed => { 3u8.write(writer)?; @@ -8570,6 +8684,11 @@ impl Writeable for Channel where SP::Target: SignerProvider { let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) }; + let mut monitor_pending_update_adds = None; + if !self.context.monitor_pending_update_adds.is_empty() { + monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds); + } + write_tlv_fields!(writer, { (0, self.context.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -8587,6 +8706,7 @@ impl Writeable for Channel where SP::Target: SignerProvider { (7, self.context.shutdown_scriptpubkey, option), (8, self.context.blocked_monitor_updates, optional_vec), (9, self.context.target_closing_feerate_sats_per_kw, option), + (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, self.context.monitor_pending_finalized_fulfills, required_vec), (13, self.context.channel_creation_height, required), (15, preimages, required_vec), @@ -8605,7 +8725,8 @@ impl Writeable for Channel where SP::Target: SignerProvider { (39, pending_outbound_blinding_points, optional_vec), (41, holding_cell_blinding_points, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 - (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122 + // 45 and 47 are reserved for async signing + (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122 }); Ok(()) @@ -8681,8 +8802,22 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch cltv_expiry: Readable::read(reader)?, payment_hash: Readable::read(reader)?, state: match ::read(reader)? { - 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?), - 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?), + 1 => { + let resolution = if ver <= 3 { + InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + } else { + Readable::read(reader)? + }; + InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) + }, + 2 => { + let resolution = if ver <= 3 { + InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? } + } else { + Readable::read(reader)? + }; + InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) + }, 3 => InboundHTLCState::Committed, 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?), _ => return Err(DecodeError::InvalidValue), @@ -8899,6 +9034,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut holding_cell_blinding_points_opt: Option>> = None; let mut malformed_htlcs: Option> = None; + let mut monitor_pending_update_adds: Option> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -8911,6 +9047,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (7, shutdown_scriptpubkey, option), (8, blocked_monitor_updates, optional_vec), (9, target_closing_feerate_sats_per_kw, option), + (10, monitor_pending_update_adds, option), // Added in 0.0.122 (11, monitor_pending_finalized_fulfills, optional_vec), (13, channel_creation_height, option), (15, preimages_opt, optional_vec), @@ -8929,7 +9066,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (39, pending_outbound_blinding_points_opt, optional_vec), (41, holding_cell_blinding_points_opt, optional_vec), (43, malformed_htlcs, optional_vec), // Added in 0.0.119 - (45, local_initiated_shutdown, option), + // 45 and 47 are reserved for async signing + (49, local_initiated_shutdown, option), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -9082,6 +9220,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch monitor_pending_forwards, monitor_pending_failures, monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), + monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()), signer_pending_commitment_update: false, signer_pending_funding: false,