+ pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+ let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
+ match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+ // Even if we aren't supposed to let new monitor updates with commitment state
+ // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+ // matter what. Sadly, to push a new monitor update which flies before others
+ // already queued, we have to insert it into the pending queue and update the
+ // update_ids of all the following monitors.
+ if release_cs_monitor && msg.is_some() {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them
+ // to be strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ } else {
+ let new_mon_id = self.context.blocked_monitor_updates.get(0)
+ .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+ monitor_update.update_id = new_mon_id;
+ for held_update in self.context.blocked_monitor_updates.iter_mut() {
+ held_update.update.update_id += 1;
+ }
+ if msg.is_some() {
+ debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+ let update = self.build_commitment_no_status_check(logger);
+ self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+ update,
+ });
+ }
+ }
+
+ self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+ UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
+ },
+ UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
+ }
+ }
+
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
+ -> Result<(), ChannelError> where L::Target: Logger {
+ self.fail_htlc(htlc_id_arg, err_packet, true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ }
+
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
+ -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ panic!("Was asked to fail an HTLC when channel was not in an operational state");
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
+
+ let mut pending_idx = core::usize::MAX;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ }
+ return Ok(None);
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
+ }
+ }
+ pending_idx = idx;
+ }
+ }
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
+ // is simply a duplicate fail, not previously failed and we failed-back too early.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
+
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
+ force_holding_cell = true;
+ }
+
+ // Now update local state:
+ if force_holding_cell {
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ }
+ },
+ _ => {}
+ }
+ }
+ log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
+ htlc_id: htlc_id_arg,
+ err_packet,
+ });
+ return Ok(None);
+ }
+
+ log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+ {
+ let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+ }
+
+ Ok(Some(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc_id_arg,
+ reason: err_packet
+ }))
+ }
+
+ // Message handlers:
+
+ /// Handles a funding_signed message from the remote end.
+ /// If this call is successful, broadcast the funding transaction (and not before!)
+ pub fn funding_signed<L: Deref>(
+ &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
+ ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
+ where
+ L::Target: Logger
+ {
+ if !self.context.is_outbound() {
+ return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
+ }
+ if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
+ return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
+ }
+ if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+ self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+ self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+ }
+
+ let funding_script = self.context.get_funding_redeemscript();
+
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+ let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+ {
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
+ }
+ }
+
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
+
+ self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
+ .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+
+
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let funding_txo = self.context.get_funding_txo().unwrap();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+ let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+ monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
+ &self.context.destination_script, (funding_txo, funding_txo_script),
+ &self.context.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.context.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx, best_block, self.context.counterparty_node_id);
+
+ channel_monitor.provide_initial_counterparty_commitment_tx(
+ counterparty_initial_bitcoin_tx.txid, Vec::new(),
+ self.context.cur_counterparty_commitment_transaction_number,
+ self.context.counterparty_cur_commitment_point.unwrap(),
+ counterparty_initial_commitment_tx.feerate_per_kw(),
+ counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
+ counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
+
+ assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
+ self.context.channel_state = ChannelState::FundingSent as u32;
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+ log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
+
+ let need_channel_ready = self.check_get_channel_ready(0).is_some();
+ self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ Ok(channel_monitor)
+ }
+
+ /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
+ /// and the channel is now usable (and public), this may generate an announcement_signatures to
+ /// reply with.
+ pub fn channel_ready<NS: Deref, L: Deref>(
+ &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
+ user_config: &UserConfig, best_block: &BestBlock, logger: &L
+ ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ self.context.workaround_lnd_bug_4006 = Some(msg.clone());
+ return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
+ }
+
+ if let Some(scid_alias) = msg.short_channel_id_alias {
+ if Some(scid_alias) != self.context.short_channel_id {
+ // The scid alias provided can be used to route payments *from* our counterparty,
+ // i.e. can be used for inbound payments and provided in invoices, but is not used
+ // when routing outbound payments.
+ self.context.latest_inbound_scid_alias = Some(scid_alias);
+ }
+ }
+
+ let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
+
+ if non_shutdown_state == ChannelState::FundingSent as u32 {
+ self.context.channel_state |= ChannelState::TheirChannelReady as u32;
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+ self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+ self.context.update_time_counter += 1;
+ } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
+ // If we reconnected before sending our `channel_ready` they may still resend theirs:
+ (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
+ (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
+ {
+ // They probably disconnected/reconnected and re-sent the channel_ready, which is
+ // required, or they're sending a fresh SCID alias.
+ let expected_point =
+ if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
+ // If they haven't ever sent an updated point, the point they send should match
+ // the current one.
+ self.context.counterparty_cur_commitment_point
+ } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
+ // If we've advanced the commitment number once, the second commitment point is
+ // at `counterparty_prev_commitment_point`, which is not yet revoked.
+ debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
+ self.context.counterparty_prev_commitment_point
+ } else {
+ // If they have sent updated points, channel_ready is always supposed to match
+ // their "first" point, which we re-derive here.
+ Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
+ &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
+ ).expect("We already advanced, so previous secret keys should have been validated already")))
+ };
+ if expected_point != Some(msg.next_per_commitment_point) {
+ return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
+ }
+ return Ok(None);
+ } else {
+ return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
+ }
+
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+
+ log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
+
+ Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
+ }
+
+ pub fn update_add_htlc<F, FE: Deref, L: Deref>(
+ &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
+ create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
+ ) -> Result<(), ChannelError>
+ where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
+ FE::Target: FeeEstimator, L::Target: Logger,
+ {
+ // We can't accept HTLCs sent after we've sent a shutdown.
+ let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
+ if local_sent_shutdown {
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
+ }
+ // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
+ let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
+ if remote_sent_shutdown {
+ return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
+ }
+ if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
+ return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
+ }
+ if msg.amount_msat == 0 {
+ return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
+ }
+ if msg.amount_msat < self.context.holder_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
+ }
+
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
+ return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
+ }
+ if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
+ }
+ // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
+ // the reserve_satoshis we told them to always have as direct payment so that they lose
+ // something if we punish them for broadcasting an old state).
+ // Note that we don't really care about having a small/no to_remote output in our local
+ // commitment transactions, as the purpose of the channel reserve is to ensure we can
+ // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be