+ // First check the channel type is known, failing before we do anything else if we don't
+ // support this channel type.
+ let channel_type = if let Some(channel_type) = &msg.channel_type {
+ if channel_type.supports_any_optional_bits() {
+ return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+ }
+
+ // We only support the channel types defined by the `ChannelManager` in
+ // `provided_channel_type_features`. The channel type must always support
+ // `static_remote_key`.
+ if !channel_type.requires_static_remote_key() {
+ return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+ }
+ // Make sure we support all of the features behind the channel type.
+ if !channel_type.is_subset(our_supported_features) {
+ return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+ }
+ if channel_type.requires_scid_privacy() && announced_channel {
+ return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+ }
+ channel_type.clone()
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ channel_type
+ };
+ let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
+
+ let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: msg.funding_pubkey,
+ revocation_basepoint: msg.revocation_basepoint,
+ payment_point: msg.payment_point,
+ delayed_payment_basepoint: msg.delayed_payment_basepoint,
+ htlc_basepoint: msg.htlc_basepoint
+ };
+
+ if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
+ return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+ }
+
+ // Check sanity of message fields:
+ if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
+ return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
+ }
+ if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
+ }
+ if msg.channel_reserve_satoshis > msg.funding_satoshis {
+ return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
+ }
+ let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
+ if msg.push_msat > full_channel_value_msat {
+ return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
+ }
+ if msg.dust_limit_satoshis > msg.funding_satoshis {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
+ }
+ if msg.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ }
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
+
+ let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ if msg.to_self_delay > max_counterparty_selected_contest_delay {
+ return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
+ }
+ if msg.max_accepted_htlcs < 1 {
+ return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ }
+ if msg.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+ }
+
+ // Now check against optional parameters as set by config...
+ if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
+ return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
+ }
+ if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
+ }
+ if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
+ }
+ if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
+ }
+ if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
+ }
+ if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+
+ // Convert things into internal flags and prep our state:
+
+ if config.channel_handshake_limits.force_announced_channel_preference {
+ if config.channel_handshake_config.announced_channel != announced_channel {
+ return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
+ }
+ }
+
+ let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
+ if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ // Protocol level safety check in place, although it should never happen because
+ // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
+ }
+ if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
+ msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
+ }
+ if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
+ return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+ }
+
+ // check if the funder's amount for the initial commitment tx is sufficient
+ // for full fee payment plus a few HTLCs to ensure the channel will be useful.
+ let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
+ let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
+ if funders_amount_msat / 1000 < commitment_tx_fee {
+ return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
+ }
+
+ let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
+ // While it's reasonable for us to not meet the channel reserve initially (if they don't
+ // want to push much to us), our counterparty should always have more than our reserve.
+ if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
+ }
+
+ let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+ match &msg.shutdown_scriptpubkey {
+ &Some(ref script) => {
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ if script.len() == 0 {
+ None
+ } else {
+ if !script::is_bolt2_compliant(&script, their_features) {
+ return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+ }
+ Some(script.clone())
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &None => {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ }
+ }
+ } else { None };
+
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
+ }
+ } else { None };
+
+ if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+ if !shutdown_scriptpubkey.is_compatible(&their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ }
+ }
+
+ let destination_script = match signer_provider.get_destination_script() {
+ Ok(script) => script,
+ Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+ };
+
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+
+ let chan = Channel {
+ context: ChannelContext {
+ user_id,
+
+ config: LegacyChannelConfig {
+ options: config.channel_config.clone(),
+ announced_channel,
+ commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+ },
+
+ prev_config: None,
+
+ inbound_handshake_limits_override: None,
+
+ temporary_channel_id: Some(msg.temporary_channel_id),
+ channel_id: msg.temporary_channel_id,
+ channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
+ announcement_sigs_state: AnnouncementSigsState::NotSent,
+ secp_ctx,
+
+ latest_monitor_update_id: 0,
+
+ holder_signer,
+ shutdown_scriptpubkey,
+ destination_script,
+
+ cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ value_to_self_msat: msg.push_msat,
+
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
+ holding_cell_htlc_updates: Vec::new(),
+ pending_update_fee: None,
+ holding_cell_update_fee: None,
+ next_holder_htlc_id: 0,
+ next_counterparty_htlc_id: 0,
+ update_time_counter: 1,
+
+ resend_order: RAACommitmentOrder::CommitmentFirst,
+
+ monitor_pending_channel_ready: false,
+ monitor_pending_revoke_and_ack: false,
+ monitor_pending_commitment_signed: false,
+ monitor_pending_forwards: Vec::new(),
+ monitor_pending_failures: Vec::new(),
+ monitor_pending_finalized_fulfills: Vec::new(),
+
+ #[cfg(debug_assertions)]
+ holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+ #[cfg(debug_assertions)]
+ counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+
+ last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw: None,
+
+ inbound_awaiting_accept: true,
+
+ funding_tx_confirmed_in: None,
+ funding_tx_confirmation_height: 0,
+ short_channel_id: None,
+ channel_creation_height: current_chain_height,
+
+ feerate_per_kw: msg.feerate_per_kw,
+ channel_value_satoshis: msg.funding_satoshis,
+ counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
+ holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
+ counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
+ holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
+ counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
+ holder_selected_channel_reserve_satoshis,
+ counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
+ holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
+ counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
+ holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
+ minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
+
+ counterparty_forwarding_info: None,
+
+ channel_transaction_parameters: ChannelTransactionParameters {
+ holder_pubkeys: pubkeys,
+ holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
+ is_outbound_from_holder: false,
+ counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: msg.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ }),
+ funding_outpoint: None,
+ opt_anchors: if opt_anchors { Some(()) } else { None },
+ opt_non_zero_fee_anchors: None
+ },
+ funding_transaction: None,
+
+ counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
+ counterparty_prev_commitment_point: None,
+ counterparty_node_id,
+
+ counterparty_shutdown_scriptpubkey,
+
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
+
+ channel_update_status: ChannelUpdateStatus::Enabled,
+ closing_signed_in_flight: false,
+
+ announcement_sigs: None,
+
+ #[cfg(any(test, fuzzing))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, fuzzing))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
+
+ workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
+
+ latest_inbound_scid_alias: None,
+ outbound_scid_alias,
+
+ channel_pending_event_emitted: false,
+ channel_ready_event_emitted: false,
+
+ #[cfg(any(test, fuzzing))]
+ historical_inbound_htlc_fulfills: HashSet::new(),
+
+ channel_type,
+ channel_keys_id,
+
+ pending_monitor_updates: Vec::new(),
+ }
+ };
+
+ Ok(chan)
+ }
+
+ #[inline]
+ fn get_closing_scriptpubkey(&self) -> Script {
+ // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
+ // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
+ // outside of those situations will fail.
+ self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
+ }
+
+ #[inline]
+ fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
+ let mut ret =
+ (4 + // version
+ 1 + // input count
+ 36 + // prevout
+ 1 + // script length (0)
+ 4 + // sequence
+ 1 + // output count
+ 4 // lock time
+ )*4 + // * 4 for non-witness parts
+ 2 + // witness marker and flag
+ 1 + // witness element count
+ 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
+ self.context.get_funding_redeemscript().len() as u64 + // funding witness script
+ 2*(1 + 71); // two signatures + sighash type flags
+ if let Some(spk) = a_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ if let Some(spk) = b_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ ret
+ }
+
+ #[inline]
+ fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
+ assert!(self.context.pending_inbound_htlcs.is_empty());
+ assert!(self.context.pending_outbound_htlcs.is_empty());
+ assert!(self.context.pending_update_fee.is_none());
+
+ let mut total_fee_satoshis = proposed_total_fee_satoshis;
+ let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
+ let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
+
+ if value_to_holder < 0 {
+ assert!(self.context.is_outbound());
+ total_fee_satoshis += (-value_to_holder) as u64;
+ } else if value_to_counterparty < 0 {
+ assert!(!self.context.is_outbound());
+ total_fee_satoshis += (-value_to_counterparty) as u64;
+ }
+
+ if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_counterparty = 0;
+ }
+
+ if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_holder = 0;
+ }
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let holder_shutdown_script = self.get_closing_scriptpubkey();
+ let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
+ let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
+
+ let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
+ (closing_transaction, total_fee_satoshis)
+ }
+
+ fn funding_outpoint(&self) -> OutPoint {
+ self.context.channel_transaction_parameters.funding_outpoint.unwrap()
+ }
+
+ /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
+ /// entirely.
+ ///
+ /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
+ /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
+ ///
+ /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
+ /// disconnected).
+ pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
+ (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
+ where L::Target: Logger {
+ // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
+ // (see equivalent if condition there).
+ assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+ let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
+ let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+ self.context.latest_monitor_update_id = mon_update_id;
+ if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
+ assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
+ }
+ }
+
+ fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
+ // Either ChannelReady got set (which means it won't be unset) or there is no way any
+ // caller thought we could have something claimed (cause we wouldn't have accepted in an
+ // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
+ // either.
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+
+ let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
+
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
+
+ let mut pending_idx = core::usize::MAX;
+ let mut htlc_value_msat = 0;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ assert_eq!(htlc.payment_hash, payment_hash_calc);
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+ }
+ return UpdateFulfillFetch::DuplicateClaim {};
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ // Don't return in release mode here so that we can update channel_monitor
+ }
+ }
+ pending_idx = idx;
+ htlc_value_msat = htlc.amount_msat;
+ break;
+ }
+ }
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
+ // this is simply a duplicate claim, not previously failed and we lost funds.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
+ }
+
+ // Now update local state:
+ //
+ // We have to put the payment_preimage in the channel_monitor right away here to ensure we
+ // can claim it even if the channel hits the chain before we see their next commitment.
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage_arg.clone(),
+ }],
+ };
+
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ // Note that this condition is the same as the assertion in
+ // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
+ // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
+ // do not not get into this branch.
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ // Make sure we don't leave latest_monitor_update_id incremented here:
+ self.context.latest_monitor_update_id -= 1;
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
+ // TODO: We may actually be able to switch to a fulfill here, though its
+ // rare enough it may not be worth the complexity burden.
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ },
+ _ => {}
+ }
+ }
+ log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
+ payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
+ });
+ #[cfg(any(test, fuzzing))]
+ self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ #[cfg(any(test, fuzzing))]
+ self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+
+ {
+ let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+ if let InboundHTLCState::Committed = htlc.state {
+ } else {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
+ }
+
+ UpdateFulfillFetch::NewClaim {
+ monitor_update,
+ htlc_value_msat,
+ msg: Some(msgs::UpdateFulfillHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc_id_arg,
+ payment_preimage: payment_preimage_arg,
+ }),
+ }
+ }
+
+ pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+ let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+ match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+ // Even if we aren't supposed to let new monitor updates with commitment state
+ // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+ // matter what. Sadly, to push a new monitor update which flies before others
+ // already queued, we have to insert it into the pending queue and update the
+ // update_ids of all the following monitors.
+ let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them
+ // to be strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ self.context.pending_monitor_updates.len() - 1
+ } else {
+ let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
+ .unwrap_or(self.context.pending_monitor_updates.len());
+ let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
+ .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+ monitor_update.update_id = new_mon_id;
+ self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+ held_update.update.update_id += 1;
+ }
+ if msg.is_some() {
+ debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+ let update = self.build_commitment_no_status_check(logger);
+ self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update, blocked: true,
+ });
+ }
+ insert_pos
+ };
+ self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+ UpdateFulfillCommitFetch::NewClaim {
+ monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
+ .expect("We just pushed the monitor update").update,
+ htlc_value_msat,
+ }
+ },
+ UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
+ }
+ }
+
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
+ -> Result<(), ChannelError> where L::Target: Logger {
+ self.fail_htlc(htlc_id_arg, err_packet, true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ }
+
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
+ -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ panic!("Was asked to fail an HTLC when channel was not in an operational state");
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
+
+ let mut pending_idx = core::usize::MAX;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ }
+ return Ok(None);
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
+ }
+ }
+ pending_idx = idx;
+ }
+ }
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
+ // is simply a duplicate fail, not previously failed and we failed-back too early.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
+
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
+ force_holding_cell = true;
+ }
+
+ // Now update local state:
+ if force_holding_cell {
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ }
+ },
+ _ => {}
+ }