pub fn is_live(&self) -> bool {
self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
}
+
+ // Public utilities:
+
+ pub fn channel_id(&self) -> [u8; 32] {
+ self.channel_id
+ }
+
+ // Return the `temporary_channel_id` used during channel establishment.
+ //
+ // Will return `None` for channels created prior to LDK version 0.0.115.
+ pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
+ self.temporary_channel_id
+ }
+
+ pub fn minimum_depth(&self) -> Option<u32> {
+ self.minimum_depth
+ }
+
+ /// Gets the "user_id" value passed into the construction of this channel. It has no special
+ /// meaning and exists only to allow users to have a persistent identifier of a channel.
+ pub fn get_user_id(&self) -> u128 {
+ self.user_id
+ }
+
+ /// Gets the channel's type
+ pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
+ &self.channel_type
+ }
+
+ /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
+ /// is_usable() returns true).
+ /// Allowed in any state (including after shutdown)
+ pub fn get_short_channel_id(&self) -> Option<u64> {
+ self.short_channel_id
+ }
+
+ /// Allowed in any state (including after shutdown)
+ pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
+ self.latest_inbound_scid_alias
+ }
+
+ /// Allowed in any state (including after shutdown)
+ pub fn outbound_scid_alias(&self) -> u64 {
+ self.outbound_scid_alias
+ }
+
+ /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
+ /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
+ pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
+ assert_eq!(self.outbound_scid_alias, 0);
+ self.outbound_scid_alias = outbound_scid_alias;
+ }
+
+ /// Returns the funding_txo we either got from our peer, or were given by
+ /// get_outbound_funding_created.
+ pub fn get_funding_txo(&self) -> Option<OutPoint> {
+ self.channel_transaction_parameters.funding_outpoint
+ }
+
+ /// Returns the block hash in which our funding transaction was confirmed.
+ pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
+ self.funding_tx_confirmed_in
+ }
+
+ /// Returns the current number of confirmations on the funding transaction.
+ pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
+ if self.funding_tx_confirmation_height == 0 {
+ // We either haven't seen any confirmation yet, or observed a reorg.
+ return 0;
+ }
+
+ height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
+ }
+
+ fn get_holder_selected_contest_delay(&self) -> u16 {
+ self.channel_transaction_parameters.holder_selected_contest_delay
+ }
+
+ fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
+ &self.channel_transaction_parameters.holder_pubkeys
+ }
+
+ pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
+ self.channel_transaction_parameters.counterparty_parameters
+ .as_ref().map(|params| params.selected_contest_delay)
+ }
+
+ fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
+ &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
+ }
+
+ /// Allowed in any state (including after shutdown)
+ pub fn get_counterparty_node_id(&self) -> PublicKey {
+ self.counterparty_node_id
+ }
+
+ /// Allowed in any state (including after shutdown)
+ pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
+ self.holder_htlc_minimum_msat
+ }
+
+ /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
+ pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
+ self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
+ }
+
+ /// Allowed in any state (including after shutdown)
+ pub fn get_announced_htlc_max_msat(&self) -> u64 {
+ return cmp::min(
+ // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
+ // to use full capacity. This is an effort to reduce routing failures, because in many cases
+ // channel might have been used to route very small values (either by honest users or as DoS).
+ self.channel_value_satoshis * 1000 * 9 / 10,
+
+ self.counterparty_max_htlc_value_in_flight_msat
+ );
+ }
+
+ /// Allowed in any state (including after shutdown)
+ pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
+ self.counterparty_htlc_minimum_msat
+ }
+
+ /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
+ pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
+ self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
+ }
+
+ fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
+ self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
+ let holder_reserve = self.holder_selected_channel_reserve_satoshis;
+ cmp::min(
+ (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
+ party_max_htlc_value_in_flight_msat
+ )
+ })
+ }
+
+ pub fn get_value_satoshis(&self) -> u64 {
+ self.channel_value_satoshis
+ }
+
+ pub fn get_fee_proportional_millionths(&self) -> u32 {
+ self.config.options.forwarding_fee_proportional_millionths
+ }
+
+ pub fn get_cltv_expiry_delta(&self) -> u16 {
+ cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
+ }
+
+ pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
+ self.config.options.max_dust_htlc_exposure_msat
+ }
+
+ /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
+ pub fn prev_config(&self) -> Option<ChannelConfig> {
+ self.prev_config.map(|prev_config| prev_config.0)
+ }
+
+ // Checks whether we should emit a `ChannelPending` event.
+ pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
+ self.is_funding_initiated() && !self.channel_pending_event_emitted
+ }
+
+ // Returns whether we already emitted a `ChannelPending` event.
+ pub(crate) fn channel_pending_event_emitted(&self) -> bool {
+ self.channel_pending_event_emitted
+ }
+
+ // Remembers that we already emitted a `ChannelPending` event.
+ pub(crate) fn set_channel_pending_event_emitted(&mut self) {
+ self.channel_pending_event_emitted = true;
+ }
+
+ // Checks whether we should emit a `ChannelReady` event.
+ pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
+ self.is_usable() && !self.channel_ready_event_emitted
+ }
+
+ // Remembers that we already emitted a `ChannelReady` event.
+ pub(crate) fn set_channel_ready_event_emitted(&mut self) {
+ self.channel_ready_event_emitted = true;
+ }
+
+ /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
+ /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
+ /// no longer be considered when forwarding HTLCs.
+ pub fn maybe_expire_prev_config(&mut self) {
+ if self.prev_config.is_none() {
+ return;
+ }
+ let prev_config = self.prev_config.as_mut().unwrap();
+ prev_config.1 += 1;
+ if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
+ self.prev_config = None;
+ }
+ }
+
+ /// Returns the current [`ChannelConfig`] applied to the channel.
+ pub fn config(&self) -> ChannelConfig {
+ self.config.options
+ }
+
+ /// Updates the channel's config. A bool is returned indicating whether the config update
+ /// applied resulted in a new ChannelUpdate message.
+ pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
+ let did_channel_update =
+ self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
+ self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
+ self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
+ if did_channel_update {
+ self.prev_config = Some((self.config.options, 0));
+ // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
+ // policy change to propagate throughout the network.
+ self.update_time_counter += 1;
+ }
+ self.config.options = *config;
+ did_channel_update
+ }
+
+ /// Returns true if funding_created was sent/received.
+ pub fn is_funding_initiated(&self) -> bool {
+ self.channel_state >= ChannelState::FundingSent as u32
+ }
}
// Internal utility functions for channels
log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
- get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound()),
+ get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()),
log_bytes!(self.context.channel_id), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
macro_rules! get_htlc_in_commitment {
let mut value_to_a = if local { value_to_self } else { value_to_remote };
let mut value_to_b = if local { value_to_remote } else { value_to_self };
let (funding_pubkey_a, funding_pubkey_b) = if local {
- (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
+ (self.context.get_holder_pubkeys().funding_pubkey, self.context.get_counterparty_pubkeys().funding_pubkey)
} else {
- (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
+ (self.context.get_counterparty_pubkeys().funding_pubkey, self.context.get_holder_pubkeys().funding_pubkey)
};
if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
/// TODO Some magic rust shit to compile-time check this?
fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
let per_commitment_point = self.context.holder_signer.get_per_commitment_point(commitment_number, &self.context.secp_ctx);
- let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
- let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
- let counterparty_pubkeys = self.get_counterparty_pubkeys();
+ let delayed_payment_base = &self.context.get_holder_pubkeys().delayed_payment_basepoint;
+ let htlc_basepoint = &self.context.get_holder_pubkeys().htlc_basepoint;
+ let counterparty_pubkeys = self.context.get_counterparty_pubkeys();
TxCreationKeys::derive_new(&self.context.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
}
fn build_remote_transaction_keys(&self) -> TxCreationKeys {
//TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
//may see payments to it!
- let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
- let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
- let counterparty_pubkeys = self.get_counterparty_pubkeys();
+ let revocation_basepoint = &self.context.get_holder_pubkeys().revocation_basepoint;
+ let htlc_basepoint = &self.context.get_holder_pubkeys().htlc_basepoint;
+ let counterparty_pubkeys = self.context.get_counterparty_pubkeys();
TxCreationKeys::derive_new(&self.context.secp_ctx, &self.context.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
}
/// pays to get_funding_redeemscript().to_v0_p2wsh()).
/// Panics if called before accept_channel/new_from_req
pub fn get_funding_redeemscript(&self) -> Script {
- make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
+ make_funding_redeemscript(&self.context.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
}
/// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
InboundHTLCState::LocalRemoved(ref reason) => {
if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
} else {
- log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.channel_id()));
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
}
return UpdateFulfillFetch::DuplicateClaim {};
},
&HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
if htlc_id_arg == htlc_id {
- log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.channel_id()));
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
// TODO: We may actually be able to switch to a fulfill here, though its
// rare enough it may not be worth the complexity burden.
debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
_ => {}
}
}
- log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.channel_id()), self.context.channel_state);
+ log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
});
monitor_update,
htlc_value_msat,
msg: Some(msgs::UpdateFulfillHTLC {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
htlc_id: htlc_id_arg,
payment_preimage: payment_preimage_arg,
}),
_ => {}
}
}
- log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
+ log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
htlc_id: htlc_id_arg,
err_packet,
return Ok(None);
}
- log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.channel_id()));
+ log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
{
let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
}
Ok(Some(msgs::UpdateFailHTLC {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
htlc_id: htlc_id_arg,
reason: err_packet
}))
log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()),
encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
- encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
+ encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
}
let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
- log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+ log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
.map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
}
fn counterparty_funding_pubkey(&self) -> &PublicKey {
- &self.get_counterparty_pubkeys().funding_pubkey
+ &self.context.get_counterparty_pubkeys().funding_pubkey
}
pub fn funding_created<SP: Deref, L: Deref>(
initial_commitment_tx,
msg.signature,
Vec::new(),
- &self.get_holder_pubkeys().funding_pubkey,
+ &self.context.get_holder_pubkeys().funding_pubkey,
self.counterparty_funding_pubkey()
);
let funding_redeemscript = self.get_funding_redeemscript();
let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
- shutdown_script, self.get_holder_selected_contest_delay(),
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
&self.context.destination_script, (funding_txo, funding_txo_script.clone()),
&self.context.channel_transaction_parameters,
funding_redeemscript.clone(), self.context.channel_value_satoshis,
self.context.cur_counterparty_commitment_transaction_number -= 1;
self.context.cur_holder_commitment_transaction_number -= 1;
- log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id()));
+ log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
let need_channel_ready = self.check_get_channel_ready(0).is_some();
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
- log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+ log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
let holder_signer = self.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
let initial_commitment_tx = self.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
// They sign our commitment transaction, allowing us to broadcast the tx if we wish.
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
}
}
initial_commitment_tx,
msg.signature,
Vec::new(),
- &self.get_holder_pubkeys().funding_pubkey,
+ &self.context.get_holder_pubkeys().funding_pubkey,
self.counterparty_funding_pubkey()
);
let funding_redeemscript = self.get_funding_redeemscript();
- let funding_txo = self.get_funding_txo().unwrap();
+ let funding_txo = self.context.get_funding_txo().unwrap();
let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
- shutdown_script, self.get_holder_selected_contest_delay(),
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
&self.context.destination_script, (funding_txo, funding_txo_script),
&self.context.channel_transaction_parameters,
funding_redeemscript.clone(), self.context.channel_value_satoshis,
self.context.cur_holder_commitment_transaction_number -= 1;
self.context.cur_counterparty_commitment_transaction_number -= 1;
- log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
+ log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
let need_channel_ready = self.check_get_channel_ready(0).is_some();
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
- log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.channel_id()));
+ log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
}
self.context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000)
};
let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
+ if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > self.context.get_max_dust_htlc_exposure_msat() as i64 {
remaining_msat_below_dust_exposure_limit =
- Some(self.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
+ Some(self.context.get_max_dust_htlc_exposure_msat().saturating_sub(on_counterparty_dust_htlc_exposure_msat));
dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
}
let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
- if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.get_max_dust_htlc_exposure_msat() as i64 {
+ if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > self.context.get_max_dust_htlc_exposure_msat() as i64 {
remaining_msat_below_dust_exposure_limit = Some(cmp::min(
remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
- self.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
+ self.context.get_max_dust_htlc_exposure_msat().saturating_sub(on_holder_dust_htlc_exposure_msat)));
dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
}
let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
- if on_counterparty_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
+ if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- on_counterparty_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
+ on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
}
let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
- if on_holder_tx_dust_htlc_exposure_msat > self.get_max_dust_htlc_exposure_msat() {
+ if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
- on_holder_tx_dust_htlc_exposure_msat, self.get_max_dust_htlc_exposure_msat());
+ on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
}
if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
// Note that if the pending_forward_status is not updated here, then it's because we're already failing
// the HTLC, i.e. its status is already set to failing.
- log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.channel_id()));
+ log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
} else {
log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
log_bytes!(msg.signature.serialize_compact()[..]),
log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
- log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id()));
+ log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
}
for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
if let Some(_) = htlc.transaction_output_index {
let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
- self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
+ self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
- encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id()));
+ encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
}
commitment_stats.tx,
msg.signature,
msg.htlc_signatures.clone(),
- &self.get_holder_pubkeys().funding_pubkey,
+ &self.context.get_holder_pubkeys().funding_pubkey,
self.counterparty_funding_pubkey()
);
} else { false };
log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
- log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+ log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
- if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.channel_id()));
+ if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
match e {
ChannelError::Ignore(ref msg) => {
log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
- log_bytes!(payment_hash.0), msg, log_bytes!(self.channel_id()));
+ log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
// If we fail to send here, then this HTLC should
// be failed backwards. Failing to send here
// indicates that this HTLC may keep being put back
monitor_update.updates.append(&mut additional_update.updates);
log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
- log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+ log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
- log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.channel_id()));
+ log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
let mut to_forward_infos = Vec::new();
let mut revoked_htlcs = Vec::new();
let mut finalized_claimed_htlcs = Vec::new();
self.context.monitor_pending_forwards.append(&mut to_forward_infos);
self.context.monitor_pending_failures.append(&mut revoked_htlcs);
self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
- log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
}
monitor_update.updates.append(&mut additional_update.updates);
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
- log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
+ log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
} else {
- log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
}
// Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+ if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
return None;
}
- if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+ if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
return None;
}
self.context.sent_message_awaiting_response = None;
self.context.channel_state |= ChannelState::PeerDisconnected as u32;
- log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.channel_id()));
+ log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
}
/// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
self.context.monitor_pending_channel_ready = false;
let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
Some(msgs::ChannelReady {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
next_per_commitment_point,
short_channel_id_alias: Some(self.context.outbound_scid_alias),
})
self.context.monitor_pending_commitment_signed = false;
let order = self.context.resend_order.clone();
log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
- log_bytes!(self.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+ log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
MonitorRestoreUpdates {
let outbound_stats = self.get_outbound_pending_htlc_stats(None);
let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+ if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
msg.feerate_per_kw, holder_tx_dust_exposure)));
}
- if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() {
+ if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
msg.feerate_per_kw, counterparty_tx_dust_exposure)));
}
for htlc in self.context.pending_outbound_htlcs.iter() {
if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
update_add_htlcs.push(msgs::UpdateAddHTLC {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
htlc_id: htlc.htlc_id,
amount_msat: htlc.amount_msat,
payment_hash: htlc.payment_hash,
match reason {
&InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
update_fail_htlcs.push(msgs::UpdateFailHTLC {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
htlc_id: htlc.htlc_id,
reason: err_packet.clone()
});
},
&InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
htlc_id: htlc.htlc_id,
sha256_of_onion: sha256_of_onion.clone(),
failure_code: failure_code.clone(),
},
&InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
htlc_id: htlc.htlc_id,
payment_preimage: payment_preimage.clone(),
});
let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
Some(msgs::UpdateFee {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
feerate_per_kw: self.context.pending_update_fee.unwrap().0,
})
} else { None };
log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
- log_bytes!(self.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
+ log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
msgs::CommitmentUpdate {
update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
return Ok(ReestablishResponses {
channel_ready: Some(msgs::ChannelReady {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
next_per_commitment_point,
short_channel_id_alias: Some(self.context.outbound_scid_alias),
}),
// We should never have to worry about MonitorUpdateInProgress resending ChannelReady
let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
Some(msgs::ChannelReady {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
next_per_commitment_point,
short_channel_id_alias: Some(self.context.outbound_scid_alias),
})
if msg.next_local_commitment_number == next_counterparty_commitment_number {
if required_revoke.is_some() {
- log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id()));
+ log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
} else {
- log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
+ log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
}
Ok(ReestablishResponses {
})
} else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
if required_revoke.is_some() {
- log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
+ log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
} else {
- log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.channel_id()));
+ log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
}
if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
- let funding_key = self.get_holder_pubkeys().funding_pubkey.serialize();
+ let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
let counterparty_funding_key = self.counterparty_funding_pubkey().serialize();
let mut holder_sig = sig.serialize_der().to_vec();
holder_sig.push(EcdsaSighashType::All as u8);
}
let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
- match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
+ match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
Ok(_) => {},
Err(_e) => {
// The remote end may have decided to revoke their output due to inconsistent dust
}
}
- // Public utilities:
-
- pub fn channel_id(&self) -> [u8; 32] {
- self.context.channel_id
- }
-
- // Return the `temporary_channel_id` used during channel establishment.
- //
- // Will return `None` for channels created prior to LDK version 0.0.115.
- pub fn temporary_channel_id(&self) -> Option<[u8; 32]> {
- self.context.temporary_channel_id
- }
-
- pub fn minimum_depth(&self) -> Option<u32> {
- self.context.minimum_depth
- }
-
- /// Gets the "user_id" value passed into the construction of this channel. It has no special
- /// meaning and exists only to allow users to have a persistent identifier of a channel.
- pub fn get_user_id(&self) -> u128 {
- self.context.user_id
- }
-
- /// Gets the channel's type
- pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
- &self.context.channel_type
- }
-
- /// Guaranteed to be Some after both ChannelReady messages have been exchanged (and, thus,
- /// is_usable() returns true).
- /// Allowed in any state (including after shutdown)
- pub fn get_short_channel_id(&self) -> Option<u64> {
- self.context.short_channel_id
- }
-
- /// Allowed in any state (including after shutdown)
- pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
- self.context.latest_inbound_scid_alias
- }
-
- /// Allowed in any state (including after shutdown)
- pub fn outbound_scid_alias(&self) -> u64 {
- self.context.outbound_scid_alias
- }
- /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
- /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases.
- pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
- assert_eq!(self.context.outbound_scid_alias, 0);
- self.context.outbound_scid_alias = outbound_scid_alias;
- }
-
- /// Returns the funding_txo we either got from our peer, or were given by
- /// get_outbound_funding_created.
- pub fn get_funding_txo(&self) -> Option<OutPoint> {
- self.context.channel_transaction_parameters.funding_outpoint
- }
-
- /// Returns the block hash in which our funding transaction was confirmed.
- pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
- self.context.funding_tx_confirmed_in
- }
-
- /// Returns the current number of confirmations on the funding transaction.
- pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
- if self.context.funding_tx_confirmation_height == 0 {
- // We either haven't seen any confirmation yet, or observed a reorg.
- return 0;
- }
-
- height.checked_sub(self.context.funding_tx_confirmation_height).map_or(0, |c| c + 1)
- }
-
- fn get_holder_selected_contest_delay(&self) -> u16 {
- self.context.channel_transaction_parameters.holder_selected_contest_delay
- }
-
- fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
- &self.context.channel_transaction_parameters.holder_pubkeys
- }
-
- pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
- self.context.channel_transaction_parameters.counterparty_parameters
- .as_ref().map(|params| params.selected_contest_delay)
- }
-
- fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
- &self.context.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
- }
-
- /// Allowed in any state (including after shutdown)
- pub fn get_counterparty_node_id(&self) -> PublicKey {
- self.context.counterparty_node_id
- }
-
- /// Allowed in any state (including after shutdown)
- pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
- self.context.holder_htlc_minimum_msat
- }
-
- /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
- pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
- self.get_htlc_maximum_msat(self.context.holder_max_htlc_value_in_flight_msat)
- }
-
- /// Allowed in any state (including after shutdown)
- pub fn get_announced_htlc_max_msat(&self) -> u64 {
- return cmp::min(
- // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
- // to use full capacity. This is an effort to reduce routing failures, because in many cases
- // channel might have been used to route very small values (either by honest users or as DoS).
- self.context.channel_value_satoshis * 1000 * 9 / 10,
-
- self.context.counterparty_max_htlc_value_in_flight_msat
- );
- }
-
- /// Allowed in any state (including after shutdown)
- pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
- self.context.counterparty_htlc_minimum_msat
- }
-
- /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
- pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
- self.get_htlc_maximum_msat(self.context.counterparty_max_htlc_value_in_flight_msat)
- }
-
- fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
- self.context.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
- let holder_reserve = self.context.holder_selected_channel_reserve_satoshis;
- cmp::min(
- (self.context.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
- party_max_htlc_value_in_flight_msat
- )
- })
- }
-
- pub fn get_value_satoshis(&self) -> u64 {
- self.context.channel_value_satoshis
- }
-
- pub fn get_fee_proportional_millionths(&self) -> u32 {
- self.context.config.options.forwarding_fee_proportional_millionths
- }
-
- pub fn get_cltv_expiry_delta(&self) -> u16 {
- cmp::max(self.context.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
- }
-
- pub fn get_max_dust_htlc_exposure_msat(&self) -> u64 {
- self.context.config.options.max_dust_htlc_exposure_msat
- }
-
- /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
- pub fn prev_config(&self) -> Option<ChannelConfig> {
- self.context.prev_config.map(|prev_config| prev_config.0)
- }
-
- // Checks whether we should emit a `ChannelPending` event.
- pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
- self.is_funding_initiated() && !self.context.channel_pending_event_emitted
- }
-
- // Returns whether we already emitted a `ChannelPending` event.
- pub(crate) fn channel_pending_event_emitted(&self) -> bool {
- self.context.channel_pending_event_emitted
- }
-
- // Remembers that we already emitted a `ChannelPending` event.
- pub(crate) fn set_channel_pending_event_emitted(&mut self) {
- self.context.channel_pending_event_emitted = true;
- }
-
- // Checks whether we should emit a `ChannelReady` event.
- pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
- self.context.is_usable() && !self.context.channel_ready_event_emitted
- }
-
- // Remembers that we already emitted a `ChannelReady` event.
- pub(crate) fn set_channel_ready_event_emitted(&mut self) {
- self.context.channel_ready_event_emitted = true;
- }
-
- /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
- /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
- /// no longer be considered when forwarding HTLCs.
- pub fn maybe_expire_prev_config(&mut self) {
- if self.context.prev_config.is_none() {
- return;
- }
- let prev_config = self.context.prev_config.as_mut().unwrap();
- prev_config.1 += 1;
- if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
- self.context.prev_config = None;
- }
- }
-
- /// Returns the current [`ChannelConfig`] applied to the channel.
- pub fn config(&self) -> ChannelConfig {
- self.context.config.options
- }
-
- /// Updates the channel's config. A bool is returned indicating whether the config update
- /// applied resulted in a new ChannelUpdate message.
- pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
- let did_channel_update =
- self.context.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
- self.context.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
- self.context.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
- if did_channel_update {
- self.context.prev_config = Some((self.context.config.options, 0));
- // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
- // policy change to propagate throughout the network.
- self.context.update_time_counter += 1;
- }
- self.context.config.options = *config;
- did_channel_update
- }
-
fn internal_htlc_satisfies_config(
&self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
) -> Result<(), (&'static str, u16)> {
pub fn htlc_satisfies_config(
&self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
) -> Result<(), (&'static str, u16)> {
- self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.config())
+ self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
.or_else(|err| {
- if let Some(prev_config) = self.prev_config() {
+ if let Some(prev_config) = self.context.prev_config() {
self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
} else {
Err(err)
.filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
}
- /// Returns true if funding_created was sent/received.
- pub fn is_funding_initiated(&self) -> bool {
- self.context.channel_state >= ChannelState::FundingSent as u32
- }
-
/// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
/// If the channel is outbound, this implies we have not yet broadcasted the funding
/// transaction. If the channel is inbound, this implies simply that the channel has not
NS::Target: NodeSigner,
L::Target: Logger
{
- if let Some(funding_txo) = self.get_funding_txo() {
+ if let Some(funding_txo) = self.context.get_funding_txo() {
for &(index_in_block, tx) in txdata.iter() {
// Check if the transaction is the expected funding transaction, and if it is,
// check that it pays the right amount to the right script.
}
for inp in tx.input.iter() {
if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
- log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
+ log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id()));
return Err(ClosureReason::CommitmentTxConfirmed);
}
}
}
let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let keys = self.get_holder_pubkeys();
+ let keys = self.context.get_holder_pubkeys();
msgs::OpenChannel {
chain_hash,
channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
feerate_per_kw: self.context.feerate_per_kw as u32,
- to_self_delay: self.get_holder_selected_contest_delay(),
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
funding_pubkey: keys.funding_pubkey,
revocation_basepoint: keys.revocation_basepoint,
/// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let keys = self.get_holder_pubkeys();
+ let keys = self.context.get_holder_pubkeys();
msgs::AcceptChannel {
temporary_channel_id: self.context.channel_id,
channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
minimum_depth: self.context.minimum_depth.unwrap(),
- to_self_delay: self.get_holder_selected_contest_delay(),
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
funding_pubkey: keys.funding_pubkey,
revocation_basepoint: keys.revocation_basepoint,
let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
.map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
- let counterparty_node_id = NodeId::from_pubkey(&self.get_counterparty_node_id());
+ let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
let msg = msgs::UnsignedChannelAnnouncement {
features: channelmanager::provided_channel_features(&user_config),
chain_hash,
- short_channel_id: self.get_short_channel_id().unwrap(),
+ short_channel_id: self.context.get_short_channel_id().unwrap(),
node_id_1: if were_node_one { node_id } else { counterparty_node_id },
node_id_2: if were_node_one { counterparty_node_id } else { node_id },
- bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey() }),
- bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.counterparty_funding_pubkey() } else { &self.get_holder_pubkeys().funding_pubkey }),
+ bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey() }),
+ bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
excess_data: Vec::new(),
};
return None;
}
- log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.channel_id()));
+ log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
Ok(a) => a,
Err(e) => {
self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
Some(msgs::AnnouncementSignatures {
- channel_id: self.channel_id(),
- short_channel_id: self.get_short_channel_id().unwrap(),
+ channel_id: self.context.channel_id(),
+ short_channel_id: self.context.get_short_channel_id().unwrap(),
node_signature: our_node_sig,
bitcoin_signature: our_bitcoin_sig,
})
let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
- if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.get_counterparty_node_id()).is_err() {
+ if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
return Err(ChannelError::Close(format!(
"Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
- &announcement, self.get_counterparty_node_id())));
+ &announcement, self.context.get_counterparty_node_id())));
}
if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.counterparty_funding_pubkey()).is_err() {
return Err(ChannelError::Close(format!(
let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
- log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id()));
+ log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
remote_last_secret
} else {
- log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id()));
+ log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
[0;32]
};
self.mark_awaiting_response();
msgs::ChannelReestablish {
- channel_id: self.channel_id(),
+ channel_id: self.context.channel_id(),
// The protocol has two different commitment number concepts - the "commitment
// transaction number", which starts from 0 and counts up, and the "revocation key
// index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
&counterparty_commitment_txid, encode::serialize_hex(&self.get_funding_redeemscript()),
- log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.channel_id()));
+ log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
- encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
+ encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
- log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.channel_id()));
+ log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
}
}
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
// return them to fail the payment.
let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
- let counterparty_node_id = self.get_counterparty_node_id();
+ let counterparty_node_id = self.context.get_counterparty_node_id();
for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
match htlc_update {
HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
_ => {}
}
}
- let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
+ let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
// If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
// returning a channel monitor update here would imply a channel monitor update before
// we even registered the channel monitor to begin with, which is invalid.
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
- Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+ Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
update_id: self.context.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
}))
let ref htlc = htlcs[$htlc_idx];
let htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
- chan.get_counterparty_selected_contest_delay().unwrap(),
+ chan.context.get_counterparty_selected_contest_delay().unwrap(),
&htlc, $opt_anchors, false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
let htlc_sighashtype = if $opt_anchors { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
channel.get_holder_counterparty_selected_channel_reserve_satoshis();
ChannelDetails {
- channel_id: channel.channel_id(),
+ channel_id: channel.context.channel_id(),
counterparty: ChannelCounterparty {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
features: latest_features,
unspendable_punishment_reserve: to_remote_reserve_satoshis,
forwarding_info: channel.counterparty_forwarding_info(),
// Else `Channel::get_counterparty_htlc_minimum_msat` could return the
// default `0` value set by `Channel::new_outbound`.
outbound_htlc_minimum_msat: if channel.context.have_received_message() {
- Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
- outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
+ Some(channel.context.get_counterparty_htlc_minimum_msat()) } else { None },
+ outbound_htlc_maximum_msat: channel.context.get_counterparty_htlc_maximum_msat(),
},
- funding_txo: channel.get_funding_txo(),
+ funding_txo: channel.context.get_funding_txo(),
// Note that accept_channel (or open_channel) is always the first message, so
// `have_received_message` indicates that type negotiation has completed.
- channel_type: if channel.context.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
- short_channel_id: channel.get_short_channel_id(),
- outbound_scid_alias: if channel.context.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
- inbound_scid_alias: channel.latest_inbound_scid_alias(),
- channel_value_satoshis: channel.get_value_satoshis(),
+ channel_type: if channel.context.have_received_message() { Some(channel.context.get_channel_type().clone()) } else { None },
+ short_channel_id: channel.context.get_short_channel_id(),
+ outbound_scid_alias: if channel.context.is_usable() { Some(channel.context.outbound_scid_alias()) } else { None },
+ inbound_scid_alias: channel.context.latest_inbound_scid_alias(),
+ channel_value_satoshis: channel.context.get_value_satoshis(),
feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()),
unspendable_punishment_reserve: to_self_reserve_satoshis,
balance_msat: balance.balance_msat,
outbound_capacity_msat: balance.outbound_capacity_msat,
next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
next_outbound_htlc_minimum_msat: balance.next_outbound_htlc_minimum_msat,
- user_channel_id: channel.get_user_id(),
- confirmations_required: channel.minimum_depth(),
- confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
- force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
+ user_channel_id: channel.context.get_user_id(),
+ confirmations_required: channel.context.minimum_depth(),
+ confirmations: Some(channel.context.get_funding_tx_confirmations(best_block_height)),
+ force_close_spend_delay: channel.context.get_counterparty_selected_contest_delay(),
is_outbound: channel.context.is_outbound(),
is_channel_ready: channel.context.is_usable(),
is_usable: channel.context.is_live(),
is_public: channel.context.should_announce(),
- inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
- inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
- config: Some(channel.config()),
+ inbound_htlc_minimum_msat: Some(channel.context.get_holder_htlc_minimum_msat()),
+ inbound_htlc_maximum_msat: channel.context.get_holder_htlc_maximum_msat(),
+ config: Some(channel.context.config()),
}
}
}
macro_rules! update_maps_on_chan_removal {
($self: expr, $channel: expr) => {{
- $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
+ $self.id_to_peer.lock().unwrap().remove(&$channel.context.channel_id());
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
- if let Some(short_id) = $channel.get_short_channel_id() {
+ if let Some(short_id) = $channel.context.get_short_channel_id() {
short_to_chan_info.remove(&short_id);
} else {
// If the channel was never confirmed on-chain prior to its closure, remove the
// also don't want a counterparty to be able to trivially cause a memory leak by simply
// opening a million channels with us which are closed before we ever reach the funding
// stage.
- let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
+ let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.context.outbound_scid_alias());
debug_assert!(alias_removed);
}
- short_to_chan_info.remove(&$channel.outbound_scid_alias());
+ short_to_chan_info.remove(&$channel.context.outbound_scid_alias());
}}
}
log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
update_maps_on_chan_removal!($self, $channel);
let shutdown_res = $channel.force_shutdown(true);
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
},
}
macro_rules! send_channel_ready {
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
- node_id: $channel.get_counterparty_node_id(),
+ node_id: $channel.context.get_counterparty_node_id(),
msg: $channel_ready_msg,
});
// Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
// we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
- let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
- assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
+ let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
+ assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
- if let Some(real_scid) = $channel.get_short_channel_id() {
- let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
- assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
+ if let Some(real_scid) = $channel.context.get_short_channel_id() {
+ let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
+ assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
}
}}
macro_rules! emit_channel_pending_event {
($locked_events: expr, $channel: expr) => {
- if $channel.should_emit_channel_pending_event() {
+ if $channel.context.should_emit_channel_pending_event() {
$locked_events.push_back((events::Event::ChannelPending {
- channel_id: $channel.channel_id(),
- former_temporary_channel_id: $channel.temporary_channel_id(),
- counterparty_node_id: $channel.get_counterparty_node_id(),
- user_channel_id: $channel.get_user_id(),
- funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(),
+ channel_id: $channel.context.channel_id(),
+ former_temporary_channel_id: $channel.context.temporary_channel_id(),
+ counterparty_node_id: $channel.context.get_counterparty_node_id(),
+ user_channel_id: $channel.context.get_user_id(),
+ funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
}, None));
- $channel.set_channel_pending_event_emitted();
+ $channel.context.set_channel_pending_event_emitted();
}
}
}
macro_rules! emit_channel_ready_event {
($locked_events: expr, $channel: expr) => {
- if $channel.should_emit_channel_ready_event() {
- debug_assert!($channel.channel_pending_event_emitted());
+ if $channel.context.should_emit_channel_ready_event() {
+ debug_assert!($channel.context.channel_pending_event_emitted());
$locked_events.push_back((events::Event::ChannelReady {
- channel_id: $channel.channel_id(),
- user_channel_id: $channel.get_user_id(),
- counterparty_node_id: $channel.get_counterparty_node_id(),
- channel_type: $channel.get_channel_type().clone(),
+ channel_id: $channel.context.channel_id(),
+ user_channel_id: $channel.context.get_user_id(),
+ counterparty_node_id: $channel.context.get_counterparty_node_id(),
+ channel_type: $channel.context.get_channel_type().clone(),
}, None));
- $channel.set_channel_ready_event_emitted();
+ $channel.context.set_channel_ready_event_emitted();
}
}
}
let mut updates = $chan.monitor_updating_restored(&$self.logger,
&$self.node_signer, $self.genesis_hash, &$self.default_configuration,
$self.best_block.read().unwrap().height());
- let counterparty_node_id = $chan.get_counterparty_node_id();
+ let counterparty_node_id = $chan.context.get_counterparty_node_id();
let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
// We only send a channel_update in the case where we are just now sending a
// channel_ready and the channel is in a usable state. We may re-send a
} else { None };
let update_actions = $peer_state.monitor_update_blocked_actions
- .remove(&$chan.channel_id()).unwrap_or(Vec::new());
+ .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
let htlc_forwards = $self.handle_channel_resumption(
&mut $peer_state.pending_msg_events, $chan, updates.raa,
$peer_state.pending_msg_events.push(upd);
}
- let channel_id = $chan.channel_id();
+ let channel_id = $chan.context.channel_id();
core::mem::drop($peer_state_lock);
core::mem::drop($per_peer_state_lock);
match $update_res {
ChannelMonitorUpdateStatus::InProgress => {
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
- log_bytes!($chan.channel_id()[..]));
+ log_bytes!($chan.context.channel_id()[..]));
Ok(())
},
ChannelMonitorUpdateStatus::PermanentFailure => {
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
- log_bytes!($chan.channel_id()[..]));
+ log_bytes!($chan.context.channel_id()[..]));
update_maps_on_chan_removal!($self, $chan);
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
- "ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
- $chan.get_user_id(), $chan.force_shutdown(false),
+ "ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
+ $chan.context.get_user_id(), $chan.force_shutdown(false),
$self.get_channel_update_for_broadcast(&$chan).ok()));
$remove;
res
};
let res = channel.get_open_channel(self.genesis_hash.clone());
- let temporary_channel_id = channel.channel_id();
+ let temporary_channel_id = channel.context.channel_id();
match peer_state.channel_by_id.entry(temporary_channel_id) {
hash_map::Entry::Occupied(_) => {
if cfg!(fuzzing) {
match channel.unbroadcasted_funding() {
Some(transaction) => {
pending_events_lock.push_back((events::Event::DiscardFunding {
- channel_id: channel.channel_id(), transaction
+ channel_id: channel.context.channel_id(), transaction
}, None));
},
None => {},
}
pending_events_lock.push_back((events::Event::ChannelClosed {
- channel_id: channel.channel_id(),
- user_channel_id: channel.get_user_id(),
+ channel_id: channel.context.channel_id(),
+ user_channel_id: channel.context.get_user_id(),
reason: closure_reason
}, None));
}
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id.clone()) {
hash_map::Entry::Occupied(mut chan_entry) => {
- let funding_txo_opt = chan_entry.get().get_funding_txo();
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
let their_features = &peer_state.latest_features;
let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
});
}
- Ok(chan.get_counterparty_node_id())
+ Ok(chan.context.get_counterparty_node_id())
}
fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
// we don't allow forwards outbound over them.
break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
}
- if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() {
+ if chan.context.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.context.outbound_scid_alias() {
// `option_scid_alias` (referred to in LDK as `scid_privacy`) means
// "refuse to forward unless the SCID alias was used", so we pretend
// we don't have the channel here.
break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
}
}
- if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
+ if *outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
}
if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
action: msgs::ErrorAction::IgnoreError
});
}
- if chan.get_short_channel_id().is_none() {
+ if chan.context.get_short_channel_id().is_none() {
return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
}
- log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
+ log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.context.channel_id()));
self.get_channel_update_for_unicast(chan)
}
/// [`channel_update`]: msgs::ChannelUpdate
/// [`internal_closing_signed`]: Self::internal_closing_signed
fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
- log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
- let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
+ log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.context.channel_id()));
+ let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
};
self.get_channel_update_for_onion(short_channel_id, chan)
}
fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
- log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
- let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
+ log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.context.channel_id()));
+ let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
let enabled = chan.context.is_usable() && match chan.channel_update_status() {
ChannelUpdateStatus::Enabled => true,
short_channel_id,
timestamp: chan.context.get_update_time_counter(),
flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
- cltv_expiry_delta: chan.get_cltv_expiry_delta(),
- htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
- htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
+ cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
+ htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
+ htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
- fee_proportional_millionths: chan.get_fee_proportional_millionths(),
+ fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
excess_data: Vec::new(),
};
// Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
if !chan.get().context.is_live() {
return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
}
- let funding_txo = chan.get().get_funding_txo().unwrap();
+ let funding_txo = chan.get().context.get_funding_txo().unwrap();
let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
htlc_cltv, HTLCSource::OutboundRoute {
path: path.clone(),
let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
.map_err(|e| if let ChannelError::Close(msg) = e {
- MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.context.channel_id(), chan.context.get_user_id(), chan.force_shutdown(true), None)
} else { unreachable!(); });
match funding_res {
Ok(funding_msg) => (funding_msg, chan),
mem::drop(peer_state_lock);
mem::drop(per_peer_state);
- let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id());
+ let _ = handle_error!(self, funding_res, chan.context.get_counterparty_node_id());
return Err(APIError::ChannelUnavailable {
err: "Signer refused to sign the initial commitment transaction".to_owned()
});
};
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
- node_id: chan.get_counterparty_node_id(),
+ node_id: chan.context.get_counterparty_node_id(),
msg,
});
- match peer_state.channel_by_id.entry(chan.channel_id()) {
+ match peer_state.channel_by_id.entry(chan.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
panic!("Generated duplicate funding txid?");
},
hash_map::Entry::Vacant(e) => {
let mut id_to_peer = self.id_to_peer.lock().unwrap();
- if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+ if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
}
e.insert(chan);
let mut output_index = None;
let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
for (idx, outp) in tx.output.iter().enumerate() {
- if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
+ if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() {
if output_index.is_some() {
return Err(APIError::APIMisuseError {
err: "Multiple outputs matched the expected script and value".to_owned()
}
for channel_id in channel_ids {
let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
- let mut config = channel.config();
+ let mut config = channel.context.config();
config.apply(config_update);
- if !channel.update_config(&config) {
+ if !channel.context.update_config(&config) {
continue;
}
if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
} else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
msg,
});
}
err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
})
}
- chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
+ chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
},
None => return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason(failure_code, data),
- HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
+ HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id }
));
continue;
}
_ => {},
}
- chan.maybe_expire_prev_config();
+ chan.context.maybe_expire_prev_config();
if chan.should_disconnect_peer_awaiting_response() {
log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
// we're not leaking that we have a channel with the counterparty), otherwise we try to use
// an inbound SCID alias before the real SCID.
let scid_pref = if chan.context.should_announce() {
- chan.get_short_channel_id().or(chan.latest_inbound_scid_alias())
+ chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias())
} else {
- chan.latest_inbound_scid_alias().or(chan.get_short_channel_id())
+ chan.context.latest_inbound_scid_alias().or(chan.context.get_short_channel_id())
};
if let Some(scid) = scid_pref {
self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan)
let mut peer_state_lock = peer_state_opt.unwrap();
let peer_state = &mut *peer_state_lock;
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
- let counterparty_node_id = chan.get().get_counterparty_node_id();
+ let counterparty_node_id = chan.get().context.get_counterparty_node_id();
let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
-> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
- log_bytes!(channel.channel_id()),
+ log_bytes!(channel.context.channel_id()),
if raa.is_some() { "an" } else { "no" },
if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
if funding_broadcastable.is_some() { "" } else { "not " },
let mut htlc_forwards = None;
- let counterparty_node_id = channel.get_counterparty_node_id();
+ let counterparty_node_id = channel.context.get_counterparty_node_id();
if !pending_forwards.is_empty() {
- htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
- channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards));
+ htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()),
+ channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards));
}
if let Some(msg) = channel_ready {
}
if accept_0conf {
channel.get_mut().set_0conf();
- } else if channel.get().get_channel_type().requires_zero_conf() {
+ } else if channel.get().context.get_channel_type().requires_zero_conf() {
let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.get().get_counterparty_node_id(),
+ node_id: channel.get().context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage{
msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
}
// channels per-peer we can accept channels from a peer with existing ones.
if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.get().get_counterparty_node_id(),
+ node_id: channel.get().context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage{
msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
}
}
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: channel.get().get_counterparty_node_id(),
+ node_id: channel.get().context.get_counterparty_node_id(),
msg: channel.get_mut().accept_inbound_channel(user_channel_id),
});
}
) -> usize {
let mut num_unfunded_channels = 0;
for (_, chan) in peer.channel_by_id.iter() {
- if !chan.context.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
- chan.get_funding_tx_confirmations(best_block_height) == 0
+ if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
+ chan.context.get_funding_tx_confirmations(best_block_height) == 0
{
num_unfunded_channels += 1;
}
},
Ok(res) => res
};
- match peer_state.channel_by_id.entry(channel.channel_id()) {
+ match peer_state.channel_by_id.entry(channel.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
},
hash_map::Entry::Vacant(entry) => {
if !self.default_configuration.manually_accept_inbound_channels {
- if channel.get_channel_type().requires_zero_conf() {
+ if channel.context.get_channel_type().requires_zero_conf() {
return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
}
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
counterparty_node_id: counterparty_node_id.clone(),
funding_satoshis: msg.funding_satoshis,
push_msat: msg.push_msat,
- channel_type: channel.get_channel_type().clone(),
+ channel_type: channel.context.get_channel_type().clone(),
}, None));
}
match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
hash_map::Entry::Occupied(mut chan) => {
try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
- (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
+ (chan.get().context.get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
}
Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
},
hash_map::Entry::Vacant(e) => {
- match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
+ match self.id_to_peer.lock().unwrap().entry(chan.context.channel_id()) {
hash_map::Entry::Occupied(_) => {
return Err(MsgHandleErrInternal::send_err_msg_no_close(
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
funding_msg.channel_id))
},
hash_map::Entry::Vacant(i_e) => {
- i_e.insert(chan.get_counterparty_node_id());
+ i_e.insert(chan.context.get_counterparty_node_id());
}
}
hash_map::Entry::Occupied(mut chan) => {
let monitor = try_chan_entry!(self,
chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
- let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
+ let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
// We weren't able to watch the channel to begin with, so no updates should be made on
let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
if let Some(announcement_sigs) = announcement_sigs_opt {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
+ log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().context.channel_id()));
peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id.clone(),
msg: announcement_sigs,
// counterparty's announcement_signatures. Thus, we only bother to send a
// channel_update here if the channel is not public, i.e. we're not sending an
// announcement_signatures.
- log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
+ log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().context.channel_id()));
if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: counterparty_node_id.clone(),
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
}
- let funding_txo_opt = chan_entry.get().get_funding_txo();
+ let funding_txo_opt = chan_entry.get().context.get_funding_txo();
let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
dropped_htlcs = htlcs;
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let funding_txo = chan.get().get_funding_txo();
+ let funding_txo = chan.get().context.get_funding_txo();
let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
if let Some(monitor_update) = monitor_update_opt {
let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- let funding_txo = chan.get().get_funding_txo();
+ let funding_txo = chan.get().context.get_funding_txo();
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
let res = if let Some(monitor_update) = monitor_update_opt {
let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(chan_id) {
hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+ if chan.get().context.get_counterparty_node_id() != *counterparty_node_id {
if chan.get().context.should_announce() {
// If the announcement is about a channel of ours which is public, some
// other peer may simply be forwarding all its gossip to us. Don't provide
}
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
}
- let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+ let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..];
let msg_from_node_one = msg.contents.flags & 1 == 0;
if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersist);
// they have the latest channel parameters.
if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
- node_id: chan.get().get_counterparty_node_id(),
+ node_id: chan.get().context.get_counterparty_node_id(),
msg,
});
}
};
self.issue_channel_close_events(&chan, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: chan.get_counterparty_node_id(),
+ node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
},
});
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
- let counterparty_node_id = chan.get_counterparty_node_id();
- let funding_txo = chan.get_funding_txo();
+ let counterparty_node_id = chan.context.get_counterparty_node_id();
+ let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) =
chan.maybe_free_holding_cell_htlcs(&self.logger);
if !holding_cell_failed_htlcs.is_empty() {
if let Some(msg) = msg_opt {
has_update = true;
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: chan.get_counterparty_node_id(), msg,
+ node_id: chan.context.get_counterparty_node_id(), msg,
});
}
if let Some(tx) = tx_opt {
Err(e) => {
has_update = true;
let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
- handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
!close_channel
}
}
}
if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
- debug_assert_eq!(chan.get().get_funding_txo().unwrap(), channel_funding_outpoint);
+ debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint);
if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
log_bytes!(&channel_funding_outpoint.to_channel_id()[..]));
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values() {
- if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
+ if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) {
res.push((funding_txo.txid, Some(block_hash)));
}
}
let _persistence_guard = PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock,
&self.persistence_notifier, || -> NotifyOption { NotifyOption::DoPersist });
self.do_chain_event(None, |channel| {
- if let Some(funding_txo) = channel.get_funding_txo() {
+ if let Some(funding_txo) = channel.context.get_funding_txo() {
if funding_txo.txid == *txid {
channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
} else { Ok((None, Vec::new(), None)) }
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
- HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
+ HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
}
if let Some(channel_ready) = channel_ready_opt {
send_channel_ready!(self, pending_msg_events, channel, channel_ready);
if channel.context.is_usable() {
- log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
+ log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.context.channel_id()));
if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
msg,
});
}
} else {
- log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
+ log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.context.channel_id()));
}
}
}
if let Some(announcement_sigs) = announcement_sigs {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
+ log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.context.channel_id()));
pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
msg: announcement_sigs,
});
if let Some(height) = height_opt {
}
}
if channel.is_our_channel_ready() {
- if let Some(real_scid) = channel.get_short_channel_id() {
+ if let Some(real_scid) = channel.context.get_short_channel_id() {
// If we sent a 0conf channel_ready, and now have an SCID, we add it
// to the short_to_chan_info map here. Note that we check whether we
// can relay using the real SCID at relay-time (i.e.
// un-confirmed we force-close the channel, ensuring short_to_chan_info
// is always consistent.
let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
- let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
- assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
+ let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
+ assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
"SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
}
let reason_message = format!("{}", reason);
self.issue_channel_close_events(channel, reason);
pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: channel.get_counterparty_node_id(),
+ node_id: channel.context.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
- channel_id: channel.channel_id(),
+ channel_id: channel.context.channel_id(),
data: reason_message,
} },
});
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, chan| {
- let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
+ let retain = if chan.context.get_counterparty_node_id() == *counterparty_node_id {
if !chan.context.have_received_message() {
// If we created this (outbound) channel while we were disconnected from the
// peer we probably failed to send the open_channel message, which is now
false
} else {
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
- node_id: chan.get_counterparty_node_id(),
+ node_id: chan.context.get_counterparty_node_id(),
msg: chan.get_channel_reestablish(&self.logger),
});
true
}
} else { true };
- if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
+ if retain && chan.context.get_counterparty_node_id() != *counterparty_node_id {
if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
}
number_of_channels += peer_state.channel_by_id.len();
for (_, channel) in peer_state.channel_by_id.iter() {
- if !channel.is_funding_initiated() {
+ if !channel.context.is_funding_initiated() {
unfunded_channels += 1;
}
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (_, channel) in peer_state.channel_by_id.iter() {
- if channel.is_funding_initiated() {
+ if channel.context.is_funding_initiated() {
channel.write(writer)?;
}
}
pub default_config: UserConfig,
/// A map from channel funding outpoints to ChannelMonitors for those channels (ie
- /// value.get_funding_txo() should be the key).
+ /// value.context.get_funding_txo() should be the key).
///
/// If a monitor is inconsistent with the channel state during deserialization the channel will
/// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
))?;
- let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
if channel.get_latest_complete_monitor_update_id() > monitor.get_latest_update_id() {
// If the channel is ahead of the monitor, return InvalidValue:
log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
- log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
+ log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.get_latest_complete_monitor_update_id());
log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
- log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
+ log_bytes!(channel.context.channel_id()), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
}
failed_htlcs.append(&mut new_failed_htlcs);
channel_closures.push_back((events::Event::ChannelClosed {
- channel_id: channel.channel_id(),
- user_channel_id: channel.get_user_id(),
+ channel_id: channel.context.channel_id(),
+ user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::OutdatedChannelManager
}, None));
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
// backwards leg of the HTLC will simply be rejected.
log_info!(args.logger,
"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
- log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0));
- failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id()));
+ log_bytes!(channel.context.channel_id()), log_bytes!(payment_hash.0));
+ failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
}
} else {
log_info!(args.logger, "Successfully loaded channel {} at update_id {} against monitor at update id {}",
- log_bytes!(channel.channel_id()), channel.context.get_latest_monitor_update_id(),
+ log_bytes!(channel.context.channel_id()), channel.context.get_latest_monitor_update_id(),
monitor.get_latest_update_id());
channel.complete_all_mon_updates_through(monitor.get_latest_update_id());
- if let Some(short_channel_id) = channel.get_short_channel_id() {
- short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
+ if let Some(short_channel_id) = channel.context.get_short_channel_id() {
+ short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
}
- if channel.is_funding_initiated() {
- id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
+ if channel.context.is_funding_initiated() {
+ id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
}
- match peer_channels.entry(channel.get_counterparty_node_id()) {
+ match peer_channels.entry(channel.context.get_counterparty_node_id()) {
hash_map::Entry::Occupied(mut entry) => {
let by_id_map = entry.get_mut();
- by_id_map.insert(channel.channel_id(), channel);
+ by_id_map.insert(channel.context.channel_id(), channel);
},
hash_map::Entry::Vacant(entry) => {
let mut by_id_map = HashMap::new();
- by_id_map.insert(channel.channel_id(), channel);
+ by_id_map.insert(channel.context.channel_id(), channel);
entry.insert(by_id_map);
}
}
// safely discard the channel.
let _ = channel.force_shutdown(false);
channel_closures.push_back((events::Event::ChannelClosed {
- channel_id: channel.channel_id(),
- user_channel_id: channel.get_user_id(),
+ channel_id: channel.context.channel_id(),
+ user_channel_id: channel.context.get_user_id(),
reason: ClosureReason::DisconnectedPeer,
}, None));
} else {
- log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
+ log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
let peer_state = peer_mtx.lock().unwrap();
for (_, chan) in peer_state.channel_by_id.iter() {
for update in chan.uncompleted_unblocked_mon_updates() {
- if let Some(funding_txo) = chan.get_funding_txo() {
+ if let Some(funding_txo) = chan.context.get_funding_txo() {
log_trace!(args.logger, "Replaying ChannelMonitorUpdate {} for channel {}",
update.update_id, log_bytes!(funding_txo.to_channel_id()));
pending_background_events.push(
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
- if chan.outbound_scid_alias() == 0 {
+ if chan.context.outbound_scid_alias() == 0 {
let mut outbound_scid_alias;
loop {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias
.get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
}
- chan.set_outbound_scid_alias(outbound_scid_alias);
- } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) {
+ chan.context.set_outbound_scid_alias(outbound_scid_alias);
+ } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
// Note that in rare cases its possible to hit this while reading an older
// channel if we just happened to pick a colliding outbound alias above.
- log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+ log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
if chan.context.is_usable() {
- if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
+ if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
// Note that in rare cases its possible to hit this while reading an older
// channel if we just happened to pick a colliding outbound alias above.
- log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
+ log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
}