+ /// Returns transaction if there is pending funding transaction that is yet to broadcast
+ pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
+ if self.channel_state & (ChannelState::FundingCreated as u32) != 0 {
+ self.funding_transaction.clone()
+ } else {
+ None
+ }
+ }
+
+ /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
+ /// shutdown of this channel - no more calls into this Channel may be made afterwards except
+ /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
+ /// Also returns the list of payment_hashes for channels which we can safely fail backwards
+ /// immediately (others we will have to allow to time out).
+ pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+ // Note that we MUST only generate a monitor update that indicates force-closure - we're
+ // called during initialization prior to the chain_monitor in the encompassing ChannelManager
+ // being fully configured in some cases. Thus, its likely any monitor events we generate will
+ // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
+ assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
+
+ // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
+ // return them to fail the payment.
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+ let counterparty_node_id = self.get_counterparty_node_id();
+ for htlc_update in self.holding_cell_htlc_updates.drain(..) {
+ match htlc_update {
+ HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
+ dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
+ },
+ _ => {}
+ }
+ }
+ let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
+ // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
+ // returning a channel monitor update here would imply a channel monitor update before
+ // we even registered the channel monitor to begin with, which is invalid.
+ // Thus, if we aren't actually at a point where we could conceivably broadcast the
+ // funding transaction, don't return a funding txo (which prevents providing the
+ // monitor update to the user, even if we return one).
+ // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
+ if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+ self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
+ Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+ }))
+ } else { None }
+ } else { None };
+
+ self.channel_state = ChannelState::ShutdownComplete as u32;
+ self.update_time_counter += 1;
+ (monitor_update, dropped_outbound_htlcs)
+ }
+}
+
+// Internal utility functions for channels
+
+/// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
+/// `channel_value_satoshis` in msat, set through
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
+///
+/// The effective percentage is lower bounded by 1% and upper bounded by 100%.
+///
+/// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
+fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
+ let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
+ 1
+ } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
+ 100
+ } else {
+ config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
+ };
+ channel_value_satoshis * 10 * configured_percent
+}
+
+/// Returns a minimum channel reserve value the remote needs to maintain,
+/// required by us according to the configured or default
+/// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
+///
+/// Guaranteed to return a value no larger than channel_value_satoshis
+///
+/// This is used both for outbound and inbound channels and has lower bound
+/// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
+pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
+ let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
+ cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
+}
+
+/// This is for legacy reasons, present for forward-compatibility.
+/// LDK versions older than 0.0.104 don't know how read/handle values other than default
+/// from storage. Hence, we use this function to not persist default values of
+/// `holder_selected_channel_reserve_satoshis` for channels into storage.
+pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
+ let (q, _) = channel_value_satoshis.overflowing_div(100);
+ cmp::min(channel_value_satoshis, cmp::max(q, 1000))
+}
+
+// Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
+// Note that num_htlcs should not include dust HTLCs.
+#[inline]
+fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
+ feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
+}
+
+// Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
+// Note that num_htlcs should not include dust HTLCs.
+fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, opt_anchors: bool) -> u64 {
+ // Note that we need to divide before multiplying to round properly,
+ // since the lowest denomination of bitcoin on-chain is the satoshi.
+ (commitment_tx_base_weight(opt_anchors) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
+}
+
+// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
+// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
+// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
+// inbound channel.
+//
+// Holder designates channel data owned for the benefit of the user client.
+// Counterparty designates channel data owned by the another channel participant entity.
+pub(super) struct Channel<Signer: ChannelSigner> {
+ pub context: ChannelContext<Signer>,
+}
+
+#[cfg(any(test, fuzzing))]
+struct CommitmentTxInfoCached {
+ fee: u64,
+ total_pending_htlcs: usize,
+ next_holder_htlc_id: u64,
+ next_counterparty_htlc_id: u64,
+ feerate: u32,
+}
+
+impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
+ fn check_remote_fee<F: Deref, L: Deref>(fee_estimator: &LowerBoundedFeeEstimator<F>,
+ feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L)
+ -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
+ {
+ // We only bound the fee updates on the upper side to prevent completely absurd feerates,
+ // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
+ // We generally don't care too much if they set the feerate to something very high, but it
+ // could result in the channel being useless due to everything being dust.
+ let upper_limit = cmp::max(250 * 25,
+ fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
+ if feerate_per_kw as u64 > upper_limit {
+ return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
+ }
+ let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
+ // Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
+ // occasional issues with feerate disagreements between an initiator that wants a feerate
+ // of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
+ // sat/kw before the comparison here.
+ if feerate_per_kw + 250 < lower_limit {
+ if let Some(cur_feerate) = cur_feerate_per_kw {
+ if feerate_per_kw > cur_feerate {
+ log_warn!(logger,
+ "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
+ cur_feerate, feerate_per_kw);
+ return Ok(());
+ }
+ }
+ return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {} (- 250)", feerate_per_kw, lower_limit)));
+ }
+ Ok(())
+ }
+
+ #[inline]
+ fn get_closing_scriptpubkey(&self) -> Script {
+ // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
+ // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
+ // outside of those situations will fail.
+ self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
+ }
+
+ #[inline]
+ fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
+ let mut ret =
+ (4 + // version
+ 1 + // input count
+ 36 + // prevout
+ 1 + // script length (0)
+ 4 + // sequence
+ 1 + // output count
+ 4 // lock time
+ )*4 + // * 4 for non-witness parts
+ 2 + // witness marker and flag
+ 1 + // witness element count
+ 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
+ self.context.get_funding_redeemscript().len() as u64 + // funding witness script
+ 2*(1 + 71); // two signatures + sighash type flags
+ if let Some(spk) = a_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ if let Some(spk) = b_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ ret
+ }
+
+ #[inline]
+ fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
+ assert!(self.context.pending_inbound_htlcs.is_empty());
+ assert!(self.context.pending_outbound_htlcs.is_empty());
+ assert!(self.context.pending_update_fee.is_none());
+
+ let mut total_fee_satoshis = proposed_total_fee_satoshis;
+ let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
+ let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
+
+ if value_to_holder < 0 {
+ assert!(self.context.is_outbound());
+ total_fee_satoshis += (-value_to_holder) as u64;
+ } else if value_to_counterparty < 0 {
+ assert!(!self.context.is_outbound());
+ total_fee_satoshis += (-value_to_counterparty) as u64;
+ }
+
+ if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_counterparty = 0;
+ }
+
+ if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_holder = 0;
+ }
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let holder_shutdown_script = self.get_closing_scriptpubkey();
+ let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
+ let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
+
+ let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
+ (closing_transaction, total_fee_satoshis)
+ }
+
+ fn funding_outpoint(&self) -> OutPoint {
+ self.context.channel_transaction_parameters.funding_outpoint.unwrap()
+ }
+
+ /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
+ /// entirely.
+ ///
+ /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
+ /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
+ ///
+ /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
+ /// disconnected).
+ pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
+ (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
+ where L::Target: Logger {
+ // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
+ // (see equivalent if condition there).
+ assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+ let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
+ let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+ self.context.latest_monitor_update_id = mon_update_id;
+ if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
+ assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
+ }
+ }
+
+ fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
+ // Either ChannelReady got set (which means it won't be unset) or there is no way any
+ // caller thought we could have something claimed (cause we wouldn't have accepted in an
+ // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
+ // either.
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+
+ let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
+
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
+
+ let mut pending_idx = core::usize::MAX;
+ let mut htlc_value_msat = 0;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ assert_eq!(htlc.payment_hash, payment_hash_calc);
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+ }
+ return UpdateFulfillFetch::DuplicateClaim {};
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ // Don't return in release mode here so that we can update channel_monitor
+ }
+ }
+ pending_idx = idx;
+ htlc_value_msat = htlc.amount_msat;
+ break;
+ }
+ }
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
+ // this is simply a duplicate claim, not previously failed and we lost funds.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
+ }
+
+ // Now update local state:
+ //
+ // We have to put the payment_preimage in the channel_monitor right away here to ensure we
+ // can claim it even if the channel hits the chain before we see their next commitment.
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage_arg.clone(),
+ }],
+ };
+
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ // Note that this condition is the same as the assertion in
+ // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
+ // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
+ // do not not get into this branch.
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ // Make sure we don't leave latest_monitor_update_id incremented here:
+ self.context.latest_monitor_update_id -= 1;
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
+ // TODO: We may actually be able to switch to a fulfill here, though its
+ // rare enough it may not be worth the complexity burden.
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ },
+ _ => {}
+ }
+ }
+ log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
+ payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
+ });
+ #[cfg(any(test, fuzzing))]
+ self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ #[cfg(any(test, fuzzing))]
+ self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+
+ {
+ let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+ if let InboundHTLCState::Committed = htlc.state {
+ } else {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
+ }
+
+ UpdateFulfillFetch::NewClaim {
+ monitor_update,
+ htlc_value_msat,
+ msg: Some(msgs::UpdateFulfillHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc_id_arg,
+ payment_preimage: payment_preimage_arg,
+ }),
+ }
+ }
+
+ pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+ let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+ match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+ // Even if we aren't supposed to let new monitor updates with commitment state
+ // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+ // matter what. Sadly, to push a new monitor update which flies before others
+ // already queued, we have to insert it into the pending queue and update the
+ // update_ids of all the following monitors.
+ let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them
+ // to be strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ self.context.pending_monitor_updates.len() - 1
+ } else {
+ let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
+ .unwrap_or(self.context.pending_monitor_updates.len());
+ let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
+ .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+ monitor_update.update_id = new_mon_id;
+ self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+ held_update.update.update_id += 1;
+ }
+ if msg.is_some() {
+ debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+ let update = self.build_commitment_no_status_check(logger);
+ self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update, blocked: true,
+ });
+ }
+ insert_pos
+ };
+ self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+ UpdateFulfillCommitFetch::NewClaim {
+ monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
+ .expect("We just pushed the monitor update").update,
+ htlc_value_msat,
+ }
+ },
+ UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
+ }
+ }
+
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
+ -> Result<(), ChannelError> where L::Target: Logger {
+ self.fail_htlc(htlc_id_arg, err_packet, true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ }
+
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
+ -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ panic!("Was asked to fail an HTLC when channel was not in an operational state");
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
+
+ let mut pending_idx = core::usize::MAX;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ }
+ return Ok(None);
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
+ }
+ }
+ pending_idx = idx;
+ }
+ }
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
+ // is simply a duplicate fail, not previously failed and we failed-back too early.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
+
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
+ force_holding_cell = true;
+ }
+
+ // Now update local state:
+ if force_holding_cell {
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ }
+ },
+ _ => {}
+ }
+ }
+ log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
+ htlc_id: htlc_id_arg,
+ err_packet,
+ });
+ return Ok(None);
+ }
+
+ log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+ {
+ let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+ }
+
+ Ok(Some(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc_id_arg,
+ reason: err_packet
+ }))
+ }
+
+ // Message handlers:
+
+ /// Handles a funding_signed message from the remote end.
+ /// If this call is successful, broadcast the funding transaction (and not before!)
+ pub fn funding_signed<SP: Deref, L: Deref>(
+ &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
+ ) -> Result<ChannelMonitor<Signer>, ChannelError>
+ where
+ SP::Target: SignerProvider<Signer = Signer>,
+ L::Target: Logger
+ {
+ if !self.context.is_outbound() {
+ return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
+ }
+ if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
+ return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
+ }
+ if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+ self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+ self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+ }
+
+ let funding_script = self.context.get_funding_redeemscript();
+
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+ let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+ {
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
+ }
+ }
+
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
+
+ self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
+ .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+
+
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let funding_txo = self.context.get_funding_txo().unwrap();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+ let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+ monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
+ &self.context.destination_script, (funding_txo, funding_txo_script),
+ &self.context.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.context.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx, best_block, self.context.counterparty_node_id);
+
+ channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
+
+ assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
+ self.context.channel_state = ChannelState::FundingSent as u32;
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+ log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
+
+ let need_channel_ready = self.check_get_channel_ready(0).is_some();
+ self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ Ok(channel_monitor)
+ }
+
+ /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
+ /// and the channel is now usable (and public), this may generate an announcement_signatures to
+ /// reply with.
+ pub fn channel_ready<NS: Deref, L: Deref>(
+ &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
+ user_config: &UserConfig, best_block: &BestBlock, logger: &L
+ ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ self.context.workaround_lnd_bug_4006 = Some(msg.clone());
+ return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
+ }
+
+ if let Some(scid_alias) = msg.short_channel_id_alias {
+ if Some(scid_alias) != self.context.short_channel_id {
+ // The scid alias provided can be used to route payments *from* our counterparty,
+ // i.e. can be used for inbound payments and provided in invoices, but is not used
+ // when routing outbound payments.
+ self.context.latest_inbound_scid_alias = Some(scid_alias);
+ }
+ }
+
+ let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
+
+ if non_shutdown_state == ChannelState::FundingSent as u32 {
+ self.context.channel_state |= ChannelState::TheirChannelReady as u32;
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+ self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+ self.context.update_time_counter += 1;
+ } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
+ // If we reconnected before sending our `channel_ready` they may still resend theirs:
+ (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
+ (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
+ {
+ // They probably disconnected/reconnected and re-sent the channel_ready, which is
+ // required, or they're sending a fresh SCID alias.
+ let expected_point =
+ if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
+ // If they haven't ever sent an updated point, the point they send should match
+ // the current one.
+ self.context.counterparty_cur_commitment_point
+ } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
+ // If we've advanced the commitment number once, the second commitment point is
+ // at `counterparty_prev_commitment_point`, which is not yet revoked.
+ debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
+ self.context.counterparty_prev_commitment_point
+ } else {
+ // If they have sent updated points, channel_ready is always supposed to match
+ // their "first" point, which we re-derive here.
+ Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
+ &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
+ ).expect("We already advanced, so previous secret keys should have been validated already")))
+ };
+ if expected_point != Some(msg.next_per_commitment_point) {
+ return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
+ }
+ return Ok(None);
+ } else {
+ return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
+ }
+
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+
+ log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
+
+ Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
+ }
+
+ pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
+ where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
+ // We can't accept HTLCs sent after we've sent a shutdown.
+ let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
+ if local_sent_shutdown {
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
+ }
+ // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
+ let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);