From 1ee0a66d21cfd3696ed93938ba08281a0c342741 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Wed, 7 Jun 2023 11:57:35 +0200 Subject: [PATCH] Move `Channel::get_update_time_counter` and some other methods This is one of a series of commits to make sure methods are moved by chunks so they are easily reviewable in diffs. Unfortunately they are not purely move-only as fields need to be updated for things to compile, but these should be quite clear. This commit also uses these methods through the `context` field where needed for compilation and tests to pass due to the above change. --- lightning/src/ln/channel.rs | 190 ++++++++++++++--------------- lightning/src/ln/channelmanager.rs | 78 ++++++------ 2 files changed, 134 insertions(+), 134 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 0f034a8b..b1cd603f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -788,6 +788,48 @@ impl ChannelContext { pub(crate) fn opt_anchors(&self) -> bool { self.channel_transaction_parameters.opt_anchors.is_some() } + + /// Allowed in any state (including after shutdown) + pub fn get_update_time_counter(&self) -> u32 { + self.update_time_counter + } + + pub fn get_latest_monitor_update_id(&self) -> u64 { + self.latest_monitor_update_id + } + + pub fn should_announce(&self) -> bool { + self.config.announced_channel + } + + pub fn is_outbound(&self) -> bool { + self.channel_transaction_parameters.is_outbound_from_holder + } + + /// Gets the fee we'd want to charge for adding an HTLC output to this Channel + /// Allowed in any state (including after shutdown) + pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 { + self.config.options.forwarding_fee_base_msat + } + + /// Returns true if we've ever received a message from the remote end for this Channel + pub fn have_received_message(&self) -> bool { + self.channel_state > (ChannelState::OurInitSent as u32) + } + + /// Returns true if this channel is fully established and not known to be closing. + /// Allowed in any state (including after shutdown) + pub fn is_usable(&self) -> bool { + let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK; + (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready + } + + /// Returns true if this channel is currently available for use. This is a superset of + /// is_usable() and considers things like the channel being temporarily disabled. + /// Allowed in any state (including after shutdown) + pub fn is_live(&self) -> bool { + self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0) + } } // Internal utility functions for channels @@ -962,7 +1004,7 @@ impl Channel { /// not of our ability to open any channel at all. Thus, on error, we should first call this /// and see if we get a new `OpenChannel` message, otherwise the channel is failed. pub(crate) fn maybe_handle_error_without_close(&mut self, chain_hash: BlockHash) -> Result { - if !self.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } + if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); } if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() { // We've exhausted our options return Err(()); @@ -1582,9 +1624,9 @@ impl Channel { if match update_state { // Note that these match the inclusion criteria when scanning // pending_inbound_htlcs below. - FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local }, - FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local }, - FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local }, + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); !generated_by_local }, + FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.context.is_outbound()); !generated_by_local }, + FeeUpdateState::Outbound => { assert!(self.context.is_outbound()); generated_by_local }, } { feerate_per_kw = feerate; } @@ -1592,7 +1634,7 @@ impl Channel { log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), - get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), + get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()), log_bytes!(self.context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw); macro_rules! get_htlc_in_commitment { @@ -1735,7 +1777,7 @@ impl Channel { let total_fee_sat = Channel::::commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), self.context.channel_transaction_parameters.opt_anchors.is_some()); let anchors_val = if self.context.channel_transaction_parameters.opt_anchors.is_some() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64; - let (value_to_self, value_to_remote) = if self.is_outbound() { + let (value_to_self, value_to_remote) = if self.context.is_outbound() { (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000) } else { (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64) @@ -1840,14 +1882,14 @@ impl Channel { assert!(self.context.pending_update_fee.is_none()); let mut total_fee_satoshis = proposed_total_fee_satoshis; - let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.is_outbound() { total_fee_satoshis as i64 } else { 0 }; - let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.is_outbound() { 0 } else { total_fee_satoshis as i64 }; + let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 }; + let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 }; if value_to_holder < 0 { - assert!(self.is_outbound()); + assert!(self.context.is_outbound()); total_fee_satoshis += (-value_to_holder) as u64; } else if value_to_counterparty < 0 { - assert!(!self.is_outbound()); + assert!(!self.context.is_outbound()); total_fee_satoshis += (-value_to_counterparty) as u64; } @@ -2216,7 +2258,7 @@ impl Channel { let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; // Check sanity of message fields: - if !self.is_outbound() { + if !self.context.is_outbound() { return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned())); } if self.context.channel_state != ChannelState::OurInitSent as u32 { @@ -2384,7 +2426,7 @@ impl Channel { SP::Target: SignerProvider, L::Target: Logger { - if self.is_outbound() { + if self.context.is_outbound() { return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned())); } if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { @@ -2436,7 +2478,7 @@ impl Channel { let funding_redeemscript = self.get_funding_redeemscript(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); @@ -2477,7 +2519,7 @@ impl Channel { SP::Target: SignerProvider, L::Target: Logger { - if !self.is_outbound() { + if !self.context.is_outbound() { return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); } if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 { @@ -2526,7 +2568,7 @@ impl Channel { let funding_redeemscript = self.get_funding_redeemscript(); let funding_txo = self.get_funding_txo().unwrap(); let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); @@ -2737,7 +2779,7 @@ impl Channel { let mut available_capacity_msat = outbound_capacity_msat; - if self.is_outbound() { + if self.context.is_outbound() { // We should mind channel commit tx fee when computing how much of the available capacity // can be used in the next htlc. Mirrors the logic in send_htlc. // @@ -2879,7 +2921,7 @@ impl Channel { /// /// Dust HTLCs are excluded. fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - assert!(self.is_outbound()); + assert!(self.context.is_outbound()); let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { (0, 0) @@ -2982,7 +3024,7 @@ impl Channel { /// /// Dust HTLCs are excluded. fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 { - assert!(!self.is_outbound()); + assert!(!self.context.is_outbound()); let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if self.context.opt_anchors() { (0, 0) @@ -3151,7 +3193,7 @@ impl Channel { // Check that the remote can afford to pay for this HTLC on-chain at the current // feerate_per_kw, while maintaining their channel reserve (as required by the spec). - let remote_commit_tx_fee_msat = if self.is_outbound() { 0 } else { + let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else { let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered); self.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations }; @@ -3163,7 +3205,7 @@ impl Channel { return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned())); } - if !self.is_outbound() { + if !self.context.is_outbound() { // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from // the spec because in the spec, the fee spike buffer requirement doesn't exist on the // receiver's side, only on the sender's. @@ -3319,7 +3361,7 @@ impl Channel { update_state == FeeUpdateState::RemoteAnnounced } else { false }; if update_fee { - debug_assert!(!self.is_outbound()); + debug_assert!(!self.context.is_outbound()); let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())); @@ -3327,7 +3369,7 @@ impl Channel { } #[cfg(any(test, fuzzing))] { - if self.is_outbound() { + if self.context.is_outbound() { let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take(); *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None; if let Some(info) = projected_commit_tx_info { @@ -3787,14 +3829,14 @@ impl Channel { if let Some((feerate, update_state)) = self.context.pending_update_fee { match update_state { FeeUpdateState::Outbound => { - debug_assert!(self.is_outbound()); + debug_assert!(self.context.is_outbound()); log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate); self.context.feerate_per_kw = feerate; self.context.pending_update_fee = None; }, - FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); }, + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); }, FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { - debug_assert!(!self.is_outbound()); + debug_assert!(!self.context.is_outbound()); log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); require_commitment = true; self.context.feerate_per_kw = feerate; @@ -3873,13 +3915,13 @@ impl Channel { /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this /// [`Channel`] if `force_holding_cell` is false. fn send_update_fee(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option where L::Target: Logger { - if !self.is_outbound() { + if !self.context.is_outbound() { panic!("Cannot send fee from inbound channel"); } - if !self.is_usable() { + if !self.context.is_usable() { panic!("Cannot update fee until channel is fully established and we haven't started shutting down"); } - if !self.is_live() { + if !self.context.is_live() { panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)"); } @@ -3984,7 +4026,7 @@ impl Channel { if let Some((_, update_state)) = self.context.pending_update_fee { if update_state == FeeUpdateState::RemoteAnnounced { - debug_assert!(!self.is_outbound()); + debug_assert!(!self.context.is_outbound()); self.context.pending_update_fee = None; } } @@ -4053,7 +4095,7 @@ impl Channel { // (re-)broadcast the funding transaction as we may have declined to broadcast it when we // first received the funding_signed. let mut funding_broadcastable = - if self.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { + if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 { self.context.funding_transaction.take() } else { None }; // That said, if the funding transaction is already confirmed (ie we're active with a @@ -4069,7 +4111,7 @@ impl Channel { // the funding transaction confirmed before the monitor was persisted, or // * a 0-conf channel and intended to send the channel_ready before any broadcast at all. let channel_ready = if self.context.monitor_pending_channel_ready { - assert!(!self.is_outbound() || self.context.minimum_depth == Some(0), + assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!"); self.context.monitor_pending_channel_ready = false; let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx); @@ -4121,7 +4163,7 @@ impl Channel { pub fn update_fee(&mut self, fee_estimator: &LowerBoundedFeeEstimator, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger { - if self.is_outbound() { + if self.context.is_outbound() { return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned())); } if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -4212,7 +4254,7 @@ impl Channel { } } - let update_fee = if self.is_outbound() && self.context.pending_update_fee.is_some() { + let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() { Some(msgs::UpdateFee { channel_id: self.channel_id(), feerate_per_kw: self.context.pending_update_fee.unwrap().0, @@ -4423,7 +4465,7 @@ impl Channel { // If we fail to come to consensus, we'll have to force-close. let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background); let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() }; + let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() }; // The spec requires that (when the channel does not have anchors) we only send absolute // channel fees no greater than the absolute channel fee on the current commitment @@ -4432,7 +4474,7 @@ impl Channel { // some force-closure by old nodes, but we wanted to close the channel anyway. if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw { - let min_feerate = if self.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) }; + let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) }; proposed_feerate = cmp::max(proposed_feerate, min_feerate); proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate); } @@ -4446,7 +4488,7 @@ impl Channel { // if the funders' output is dust we have to know the absolute fee we're going to use. let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap())); let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000; - let proposed_max_total_fee_satoshis = if self.is_outbound() { + let proposed_max_total_fee_satoshis = if self.context.is_outbound() { // We always add force_close_avoidance_max_fee_satoshis to our normal // feerate-calculated fee, but allow the max to be overridden if we're using a // target feerate-calculated fee. @@ -4496,7 +4538,7 @@ impl Channel { return Ok((None, None)); } - if !self.is_outbound() { + if !self.context.is_outbound() { if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() { return self.closing_signed(fee_estimator, &msg); } @@ -4689,7 +4731,7 @@ impl Channel { return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned())); } - if self.is_outbound() && self.context.last_sent_closing_fee.is_none() { + if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() { return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned())); } @@ -4777,7 +4819,7 @@ impl Channel { return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee))); } - if !self.is_outbound() { + if !self.context.is_outbound() { // They have to pay, so pick the highest fee in the overlapping range. // We should never set an upper bound aside from their full balance debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000); @@ -4997,7 +5039,7 @@ impl Channel { // Checks whether we should emit a `ChannelReady` event. pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool { - self.is_usable() && !self.context.channel_ready_event_emitted + self.context.is_usable() && !self.context.channel_ready_event_emitted } // Remembers that we already emitted a `ChannelReady` event. @@ -5142,48 +5184,6 @@ impl Channel { } } - /// Allowed in any state (including after shutdown) - pub fn get_update_time_counter(&self) -> u32 { - self.context.update_time_counter - } - - pub fn get_latest_monitor_update_id(&self) -> u64 { - self.context.latest_monitor_update_id - } - - pub fn should_announce(&self) -> bool { - self.context.config.announced_channel - } - - pub fn is_outbound(&self) -> bool { - self.context.channel_transaction_parameters.is_outbound_from_holder - } - - /// Gets the fee we'd want to charge for adding an HTLC output to this Channel - /// Allowed in any state (including after shutdown) - pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 { - self.context.config.options.forwarding_fee_base_msat - } - - /// Returns true if we've ever received a message from the remote end for this Channel - pub fn have_received_message(&self) -> bool { - self.context.channel_state > (ChannelState::OurInitSent as u32) - } - - /// Returns true if this channel is fully established and not known to be closing. - /// Allowed in any state (including after shutdown) - pub fn is_usable(&self) -> bool { - let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK; - (self.context.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.context.monitor_pending_channel_ready - } - - /// Returns true if this channel is currently available for use. This is a superset of - /// is_usable() and considers things like the channel being temporarily disabled. - /// Allowed in any state (including after shutdown) - pub fn is_live(&self) -> bool { - self.is_usable() && (self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0) - } - /// Returns true if this channel has been marked as awaiting a monitor update to move forward. /// Allowed in any state (including after shutdown) pub fn is_awaiting_monitor_update(&self) -> bool { @@ -5191,7 +5191,7 @@ impl Channel { } pub fn get_latest_complete_monitor_update_id(&self) -> u64 { - if self.context.pending_monitor_updates.is_empty() { return self.get_latest_monitor_update_id(); } + if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); } self.context.pending_monitor_updates[0].update.update_id - 1 } @@ -5283,7 +5283,7 @@ impl Channel { // Because deciding we're awaiting initial broadcast spuriously could result in // funds-loss (as we don't have a monitor, but have the funding transaction confirmed), // we hard-assert here, even in production builds. - if self.is_outbound() { assert!(self.context.funding_transaction.is_some()); } + if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); } assert!(self.context.monitor_pending_channel_ready); assert_eq!(self.context.latest_monitor_update_id, 0); return true; @@ -5406,7 +5406,7 @@ impl Channel { let txo_idx = funding_txo.index as usize; if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() || tx.output[txo_idx].value != self.context.channel_value_satoshis { - if self.is_outbound() { + if self.context.is_outbound() { // If we generated the funding transaction and it doesn't match what it // should, the client is really broken and we should just panic and // tell them off. That said, because hash collisions happen with high @@ -5419,7 +5419,7 @@ impl Channel { let err_reason = "funding tx had wrong script/value or output index"; return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() }); } else { - if self.is_outbound() { + if self.context.is_outbound() { for input in tx.input.iter() { if input.witness.is_empty() { // We generated a malleable funding transaction, implying we've @@ -5539,7 +5539,7 @@ impl Channel { self.context.minimum_depth.unwrap(), funding_tx_confirmations); return Err(ClosureReason::ProcessingError { err: err_reason }); } - } else if !self.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && + } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() && height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS { log_info!(logger, "Closing channel {} due to funding timeout", log_bytes!(self.context.channel_id)); // If funding_tx_confirmed_in is unset, the channel must not be active @@ -5585,7 +5585,7 @@ impl Channel { // something in the handler for the message that prompted this message): pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel { - if !self.is_outbound() { + if !self.context.is_outbound() { panic!("Tried to open a channel for an inbound channel?"); } if self.context.channel_state != ChannelState::OurInitSent as u32 { @@ -5641,7 +5641,7 @@ impl Channel { /// /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel { - if self.is_outbound() { + if self.context.is_outbound() { panic!("Tried to send accept_channel for an outbound channel?"); } if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) { @@ -5719,7 +5719,7 @@ impl Channel { /// Do NOT broadcast the funding transaction until after a successful funding_signed call! /// If an Err is returned, it is a ChannelError::Close. pub fn get_outbound_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result where L::Target: Logger { - if !self.is_outbound() { + if !self.context.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); } if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) { @@ -5778,7 +5778,7 @@ impl Channel { if !self.context.config.announced_channel { return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned())); } - if !self.is_usable() { + if !self.context.is_usable() { return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned())); } @@ -5813,7 +5813,7 @@ impl Channel { return None; } - if !self.is_usable() { + if !self.context.is_usable() { return None; } @@ -6126,7 +6126,7 @@ impl Channel { } if let Some((feerate, update_state)) = self.context.pending_update_fee { if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce { - debug_assert!(!self.is_outbound()); + debug_assert!(!self.context.is_outbound()); log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); self.context.feerate_per_kw = feerate; self.context.pending_update_fee = None; @@ -6163,7 +6163,7 @@ impl Channel { #[cfg(any(test, fuzzing))] { - if !self.is_outbound() { + if !self.context.is_outbound() { let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take(); *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None; if let Some(info) = projected_commit_tx_info { @@ -6652,7 +6652,7 @@ impl Writeable for Channel { fail_reason.write(writer)?; } - if self.is_outbound() { + if self.context.is_outbound() { self.context.pending_update_fee.map(|(a, _)| a).write(writer)?; } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee { Some(feerate).write(writer)?; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 8872ae6d..75b17cae 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1484,16 +1484,16 @@ impl ChannelDetails { // message (as they are always the first message from the counterparty). // Else `Channel::get_counterparty_htlc_minimum_msat` could return the // default `0` value set by `Channel::new_outbound`. - outbound_htlc_minimum_msat: if channel.have_received_message() { + outbound_htlc_minimum_msat: if channel.context.have_received_message() { Some(channel.get_counterparty_htlc_minimum_msat()) } else { None }, outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(), }, funding_txo: channel.get_funding_txo(), // Note that accept_channel (or open_channel) is always the first message, so // `have_received_message` indicates that type negotiation has completed. - channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, + channel_type: if channel.context.have_received_message() { Some(channel.get_channel_type().clone()) } else { None }, short_channel_id: channel.get_short_channel_id(), - outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, + outbound_scid_alias: if channel.context.is_usable() { Some(channel.outbound_scid_alias()) } else { None }, inbound_scid_alias: channel.latest_inbound_scid_alias(), channel_value_satoshis: channel.get_value_satoshis(), feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()), @@ -1507,10 +1507,10 @@ impl ChannelDetails { confirmations_required: channel.minimum_depth(), confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)), force_close_spend_delay: channel.get_counterparty_selected_contest_delay(), - is_outbound: channel.is_outbound(), - is_channel_ready: channel.is_usable(), - is_usable: channel.is_live(), - is_public: channel.should_announce(), + is_outbound: channel.context.is_outbound(), + is_channel_ready: channel.context.is_usable(), + is_usable: channel.context.is_live(), + is_public: channel.context.should_announce(), inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()), inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(), config: Some(channel.config()), @@ -1750,7 +1750,7 @@ macro_rules! handle_monitor_update_completion { &$self.node_signer, $self.genesis_hash, &$self.default_configuration, $self.best_block.read().unwrap().height()); let counterparty_node_id = $chan.get_counterparty_node_id(); - let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() { + let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() { // We only send a channel_update in the case where we are just now sending a // channel_ready and the channel is in a usable state. We may re-send a // channel_update later through the announcement_signatures process for public @@ -2116,7 +2116,7 @@ where // Note we use is_live here instead of usable which leads to somewhat confused // internal/external nomenclature, but that's ok cause that's probably what the user // really wanted anyway. - self.list_channels_with_filter(|&(_, ref channel)| channel.is_live()) + self.list_channels_with_filter(|&(_, ref channel)| channel.context.is_live()) } /// Gets the list of channels we have with a given counterparty, in random order. @@ -2650,7 +2650,7 @@ where }, Some(chan) => chan }; - if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { + if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if // we don't allow forwards outbound over them. @@ -2669,7 +2669,7 @@ where // around to doing the actual forward, but better to fail early if we can and // hopefully an attacker trying to path-trace payments cannot make this occur // on a small/per-node/per-channel scale. - if !chan.is_live() { // channel_disabled + if !chan.context.is_live() { // channel_disabled // If the channel_update we're going to return is disabled (i.e. the // peer has been disabled for some time), return `channel_disabled`, // otherwise return `temporary_channel_failure`. @@ -2765,7 +2765,7 @@ where /// [`channel_update`]: msgs::ChannelUpdate /// [`internal_closing_signed`]: Self::internal_closing_signed fn get_channel_update_for_broadcast(&self, chan: &Channel<::Signer>) -> Result { - if !chan.should_announce() { + if !chan.context.should_announce() { return Err(LightningError { err: "Cannot broadcast a channel_update for a private channel".to_owned(), action: msgs::ErrorAction::IgnoreError @@ -2802,7 +2802,7 @@ where log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id())); let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..]; - let enabled = chan.is_usable() && match chan.channel_update_status() { + let enabled = chan.context.is_usable() && match chan.channel_update_status() { ChannelUpdateStatus::Enabled => true, ChannelUpdateStatus::DisabledStaged(_) => true, ChannelUpdateStatus::Disabled => false, @@ -2812,12 +2812,12 @@ where let unsigned = msgs::UnsignedChannelUpdate { chain_hash: self.genesis_hash, short_channel_id, - timestamp: chan.get_update_time_counter(), + timestamp: chan.context.get_update_time_counter(), flags: (!were_node_one) as u8 | ((!enabled as u8) << 1), cltv_expiry_delta: chan.get_cltv_expiry_delta(), htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(), htlc_maximum_msat: chan.get_announced_htlc_max_msat(), - fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(), + fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(), fee_proportional_millionths: chan.get_fee_proportional_millionths(), excess_data: Vec::new(), }; @@ -2866,7 +2866,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) { - if !chan.get().is_live() { + if !chan.get().context.is_live() { return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } let funding_txo = chan.get().get_funding_txo().unwrap(); @@ -3360,7 +3360,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.get(next_hop_channel_id) { Some(chan) => { - if !chan.is_usable() { + if !chan.context.is_usable() { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id)) }) @@ -3943,14 +3943,14 @@ where } fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<::Signer>, new_feerate: u32) -> NotifyOption { - if !chan.is_outbound() { return NotifyOption::SkipPersist; } + if !chan.context.is_outbound() { return NotifyOption::SkipPersist; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; } - if !chan.is_live() { + if !chan.context.is_live() { log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate); return NotifyOption::SkipPersist; @@ -4030,13 +4030,13 @@ where } match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), - ChannelUpdateStatus::DisabledStaged(_) if chan.is_live() + ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)), + ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)), + ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged(_) if !chan.is_live() + ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged(mut n) if !chan.is_live() => { + ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => { n += 1; if n >= DISABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Disabled); @@ -4050,7 +4050,7 @@ where chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n)); } }, - ChannelUpdateStatus::EnabledStaged(mut n) if chan.is_live() => { + ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => { n += 1; if n >= ENABLE_GOSSIP_TICKS { chan.set_channel_update_status(ChannelUpdateStatus::Enabled); @@ -4227,7 +4227,7 @@ where // guess somewhat. If its a public channel, we figure best to just use the real SCID (as // we're not leaking that we have a channel with the counterparty), otherwise we try to use // an inbound SCID alias before the real SCID. - let scid_pref = if chan.should_announce() { + let scid_pref = if chan.context.should_announce() { chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) } else { chan.latest_inbound_scid_alias().or(chan.get_short_channel_id()) @@ -4712,8 +4712,8 @@ where } }; log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}", - highest_applied_update_id, channel.get().get_latest_monitor_update_id()); - if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id { + highest_applied_update_id, channel.get().context.get_latest_monitor_update_id()); + if !channel.get().is_awaiting_monitor_update() || channel.get().context.get_latest_monitor_update_id() != highest_applied_update_id { return; } handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut()); @@ -4845,7 +4845,7 @@ where ) -> usize { let mut num_unfunded_channels = 0; for (_, chan) in peer.channel_by_id.iter() { - if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 && + if !chan.context.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 && chan.get_funding_tx_confirmations(best_block_height) == 0 { num_unfunded_channels += 1; @@ -5088,7 +5088,7 @@ where node_id: counterparty_node_id.clone(), msg: announcement_sigs, }); - } else if chan.get().is_usable() { + } else if chan.get().context.is_usable() { // If we're sending an announcement_signatures, we'll send the (public) // channel_update after sending a channel_announcement when we receive our // counterparty's announcement_signatures. Thus, we only bother to send a @@ -5525,7 +5525,7 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_usable() { + if !chan.get().context.is_usable() { return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError})); } @@ -5563,7 +5563,7 @@ where match peer_state.channel_by_id.entry(chan_id) { hash_map::Entry::Occupied(mut chan) => { if chan.get().get_counterparty_node_id() != *counterparty_node_id { - if chan.get().should_announce() { + if chan.get().context.should_announce() { // If the announcement is about a channel of ours which is public, some // other peer may simply be forwarding all its gossip to us. Don't provide // a scary-looking error message and return Ok instead. @@ -5612,7 +5612,7 @@ where node_id: counterparty_node_id.clone(), msg, }); - } else if chan.get().is_usable() { + } else if chan.get().context.is_usable() { // If the channel is in a usable state (ie the channel is not being shut // down), send a unicast channel_update to our counterparty to make sure // they have the latest channel parameters. @@ -6479,7 +6479,7 @@ where } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(self, pending_msg_events, channel, channel_ready); - if channel.is_usable() { + if channel.context.is_usable() { log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id())); if let Ok(msg) = self.get_channel_update_for_unicast(channel) { pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate { @@ -6911,7 +6911,7 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; peer_state.channel_by_id.retain(|_, chan| { let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { - if !chan.have_received_message() { + if !chan.context.have_received_message() { // If we created this (outbound) channel while we were disconnected from the // peer we probably failed to send the open_channel message, which is now // lost. We can't have had anything pending related to this channel, so we just @@ -7957,12 +7957,12 @@ where } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() || channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() || - channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() { + channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() { // But if the channel is behind of the monitor, close the channel: log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!"); log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast."); log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.", - log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id()); + log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id()); let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true); if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { @@ -7996,7 +7996,7 @@ where } } else { log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}", - log_bytes!(channel.channel_id()), channel.get_latest_monitor_update_id(), + log_bytes!(channel.channel_id()), channel.context.get_latest_monitor_update_id(), monitor.get_latest_update_id()); channel.complete_all_mon_updates_through(monitor.get_latest_update_id()); if let Some(short_channel_id) = channel.get_short_channel_id() { @@ -8423,7 +8423,7 @@ where log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias()); return Err(DecodeError::InvalidValue); } - if chan.is_usable() { + if chan.context.is_usable() { if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() { // Note that in rare cases its possible to hit this while reading an older // channel if we just happened to pick a colliding outbound alias above. -- 2.30.2