+ let announcement_sigs = if let Some((genesis_block_hash, node_signer, user_config)) = genesis_node_signer {
+ self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger)
+ } else { None };
+ Ok((None, timed_out_htlcs, announcement_sigs))
+ }
+
+ /// Indicates the funding transaction is no longer confirmed in the main chain. This may
+ /// force-close the channel, but may also indicate a harmless reorganization of a block or two
+ /// before the channel has reached channel_ready and we can just wait for more blocks.
+ pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
+ if self.context.funding_tx_confirmation_height != 0 {
+ // We handle the funding disconnection by calling best_block_updated with a height one
+ // below where our funding was connected, implying a reorg back to conf_height - 1.
+ let reorg_height = self.context.funding_tx_confirmation_height - 1;
+ // We use the time field to bump the current time we set on channel updates if its
+ // larger. If we don't know that time has moved forward, we can just set it to the last
+ // time we saw and it will be ignored.
+ let best_time = self.context.update_time_counter;
+ match self.do_best_block_updated(reorg_height, best_time, None::<(BlockHash, &&NodeSigner, &UserConfig)>, logger) {
+ Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
+ assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
+ assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
+ assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
+ Ok(())
+ },
+ Err(e) => Err(e)
+ }
+ } else {
+ // We never learned about the funding confirmation anyway, just ignore
+ Ok(())
+ }
+ }
+
+ // Methods to get unprompted messages to send to the remote end (or where we already returned
+ // something in the handler for the message that prompted this message):
+
+ /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
+ /// announceable and available for use (have exchanged ChannelReady messages in both
+ /// directions). Should be used for both broadcasted announcements and in response to an
+ /// AnnouncementSignatures message from the remote peer.
+ ///
+ /// Will only fail if we're not in a state where channel_announcement may be sent (including
+ /// closing).
+ ///
+ /// This will only return ChannelError::Ignore upon failure.
+ fn get_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
+ ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ if !self.context.config.announced_channel {
+ return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
+ }
+ if !self.context.is_usable() {
+ return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
+ }
+
+ let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+ .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
+ let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
+ let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
+
+ let msg = msgs::UnsignedChannelAnnouncement {
+ features: channelmanager::provided_channel_features(&user_config),
+ chain_hash,
+ short_channel_id: self.context.get_short_channel_id().unwrap(),
+ node_id_1: if were_node_one { node_id } else { counterparty_node_id },
+ node_id_2: if were_node_one { counterparty_node_id } else { node_id },
+ bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
+ bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
+ excess_data: Vec::new(),
+ };
+
+ Ok(msg)
+ }
+
+ fn get_announcement_sigs<NS: Deref, L: Deref>(
+ &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
+ best_block_height: u32, logger: &L
+ ) -> Option<msgs::AnnouncementSignatures>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return None;
+ }
+
+ if !self.context.is_usable() {
+ return None;
+ }
+
+ if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
+ log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
+ return None;
+ }
+
+ if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
+ return None;
+ }
+
+ log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
+ let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
+ Ok(a) => a,
+ Err(e) => {
+ log_trace!(logger, "{:?}", e);
+ return None;
+ }
+ };
+ let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
+ Err(_) => {
+ log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
+ return None;
+ },
+ Ok(v) => v
+ };
+ let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
+ Err(_) => {
+ log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
+ return None;
+ },
+ Ok(v) => v
+ };
+ self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
+
+ Some(msgs::AnnouncementSignatures {
+ channel_id: self.context.channel_id(),
+ short_channel_id: self.context.get_short_channel_id().unwrap(),
+ node_signature: our_node_sig,
+ bitcoin_signature: our_bitcoin_sig,
+ })
+ }
+
+ /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
+ /// available.
+ fn sign_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
+ ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
+ let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+ .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
+ let were_node_one = announcement.node_id_1 == our_node_key;
+
+ let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
+ .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
+ let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
+ Ok(msgs::ChannelAnnouncement {
+ node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
+ node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
+ bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
+ bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
+ contents: announcement,
+ })
+ } else {
+ Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
+ }
+ }
+
+ /// Processes an incoming announcement_signatures message, providing a fully-signed
+ /// channel_announcement message which we can broadcast and storing our counterparty's
+ /// signatures for later reconstruction/rebroadcast of the channel_announcement.
+ pub fn announcement_signatures<NS: Deref>(
+ &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
+ msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
+ ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
+
+ let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
+
+ if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
+ return Err(ChannelError::Close(format!(
+ "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
+ &announcement, self.context.get_counterparty_node_id())));
+ }
+ if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
+ return Err(ChannelError::Close(format!(
+ "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
+ &announcement, self.context.counterparty_funding_pubkey())));
+ }
+
+ self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return Err(ChannelError::Ignore(
+ "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
+ }
+
+ self.sign_channel_announcement(node_signer, announcement)
+ }
+
+ /// Gets a signed channel_announcement for this channel, if we previously received an
+ /// announcement_signatures from our counterparty.
+ pub fn get_signed_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
+ ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return None;
+ }
+ let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
+ Ok(res) => res,
+ Err(_) => return None,
+ };
+ match self.sign_channel_announcement(node_signer, announcement) {
+ Ok(res) => Some(res),
+ Err(_) => None,
+ }
+ }
+
+ /// May panic if called on a channel that wasn't immediately-previously
+ /// self.remove_uncommitted_htlcs_and_mark_paused()'d
+ pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
+ assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
+ assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
+ // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
+ // current to_remote balances. However, it no longer has any use, and thus is now simply
+ // set to a dummy (but valid, as required by the spec) public key.
+ // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
+ // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
+ // valid, and valid in fuzzing mode's arbitrary validity criteria:
+ let mut pk = [2; 33]; pk[1] = 0xff;
+ let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
+ let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
+ let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
+ log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
+ remote_last_secret
+ } else {
+ log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
+ [0;32]
+ };
+ self.mark_awaiting_response();
+ msgs::ChannelReestablish {
+ channel_id: self.context.channel_id(),
+ // The protocol has two different commitment number concepts - the "commitment
+ // transaction number", which starts from 0 and counts up, and the "revocation key
+ // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
+ // commitment transaction numbers by the index which will be used to reveal the
+ // revocation key for that commitment transaction, which means we have to convert them
+ // to protocol-level commitment numbers here...
+
+ // next_local_commitment_number is the next commitment_signed number we expect to
+ // receive (indicating if they need to resend one that we missed).
+ next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
+ // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
+ // receive, however we track it by the next commitment number for a remote transaction
+ // (which is one further, as they always revoke previous commitment transaction, not
+ // the one we send) so we have to decrement by 1. Note that if
+ // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
+ // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
+ // overflow here.
+ next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
+ your_last_per_commitment_secret: remote_last_secret,
+ my_current_per_commitment_point: dummy_pubkey,
+ // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
+ // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
+ // txid of that interactive transaction, else we MUST NOT set it.
+ next_funding_txid: None,
+ }
+ }
+
+
+ // Send stuff to our remote peers:
+
+ /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
+ /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+ /// commitment update.
+ ///
+ /// `Err`s will only be [`ChannelError::Ignore`].
+ pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+ onion_routing_packet: msgs::OnionPacket, logger: &L)
+ -> Result<(), ChannelError> where L::Target: Logger {
+ self
+ .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ .map_err(|err| {
+ if let ChannelError::Ignore(_) = err { /* fine */ }
+ else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
+ err
+ })
+ }
+
+ /// Adds a pending outbound HTLC to this channel, note that you probably want
+ /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
+ ///
+ /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
+ /// the wire:
+ /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
+ /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
+ /// awaiting ACK.
+ /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
+ /// we may not yet have sent the previous commitment update messages and will need to
+ /// regenerate them.
+ ///
+ /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
+ /// on this [`Channel`] if `force_holding_cell` is false.
+ ///
+ /// `Err`s will only be [`ChannelError::Ignore`].
+ fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+ onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
+ -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
+ }
+ let channel_total_msat = self.context.channel_value_satoshis * 1000;
+ if amount_msat > channel_total_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
+ }
+
+ if amount_msat == 0 {
+ return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
+ }
+
+ let available_balances = self.context.get_available_balances();
+ if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
+ available_balances.next_outbound_htlc_minimum_msat)));
+ }
+
+ if amount_msat > available_balances.next_outbound_htlc_limit_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
+ available_balances.next_outbound_htlc_limit_msat)));
+ }
+
+ if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
+ // Note that this should never really happen, if we're !is_live() on receipt of an
+ // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
+ // the user to send directly into a !is_live() channel. However, if we
+ // disconnected during the time the previous hop was doing the commitment dance we may
+ // end up getting here after the forwarding delay. In any case, returning an
+ // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
+ return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
+ }
+
+ let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+ log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
+ if force_holding_cell { "into holding cell" }
+ else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
+ else { "to peer" });
+
+ if need_holding_cell {
+ force_holding_cell = true;
+ }
+
+ // Now update local state:
+ if force_holding_cell {
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat,
+ payment_hash,
+ cltv_expiry,
+ source,
+ onion_routing_packet,
+ });
+ return Ok(None);
+ }
+
+ self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
+ htlc_id: self.context.next_holder_htlc_id,
+ amount_msat,
+ payment_hash: payment_hash.clone(),
+ cltv_expiry,
+ state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
+ source,
+ });
+
+ let res = msgs::UpdateAddHTLC {
+ channel_id: self.context.channel_id,
+ htlc_id: self.context.next_holder_htlc_id,
+ amount_msat,
+ payment_hash,
+ cltv_expiry,
+ onion_routing_packet,
+ };
+ self.context.next_holder_htlc_id += 1;
+
+ Ok(Some(res))
+ }
+
+ fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
+ log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
+ // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
+ // fail to generate this, we still are at least at a position where upgrading their status
+ // is acceptable.
+ for htlc in self.context.pending_inbound_htlcs.iter_mut() {
+ let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
+ Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
+ } else { None };
+ if let Some(state) = new_state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ htlc.state = state;
+ }
+ }
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+ }
+ }
+ if let Some((feerate, update_state)) = self.context.pending_update_fee {
+ if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
+ debug_assert!(!self.context.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ }
+ }
+ self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
+
+ let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
+ let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
+ htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
+
+ if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
+ self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
+ }
+
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
+ commitment_txid: counterparty_commitment_txid,
+ htlc_outputs: htlcs.clone(),
+ commitment_number: self.context.cur_counterparty_commitment_transaction_number,
+ their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap()
+ }]
+ };
+ self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+ monitor_update
+ }
+
+ fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+ let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+
+ #[cfg(any(test, fuzzing))]
+ {
+ if !self.context.is_outbound() {
+ let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
+ *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ if let Some(info) = projected_commit_tx_info {
+ let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
+ if info.total_pending_htlcs == total_pending_htlcs
+ && info.next_holder_htlc_id == self.context.next_holder_htlc_id
+ && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
+ && info.feerate == self.context.feerate_per_kw {
+ let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors());
+ assert_eq!(actual_fee, info.fee);
+ }
+ }
+ }
+ }
+
+ (counterparty_commitment_txid, commitment_stats.htlcs_included)
+ }
+
+ /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
+ /// generation when we shouldn't change HTLC/channel state.
+ fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+ // Get the fee tests from `build_commitment_no_state_update`
+ #[cfg(any(test, fuzzing))]
+ self.build_commitment_no_state_update(logger);
+
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+ let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+ let (signature, htlc_signatures);
+
+ {
+ let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
+ for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
+ htlcs.push(htlc);
+ }
+
+ let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
+ signature = res.0;
+ htlc_signatures = res.1;
+
+ log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
+ encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
+ &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
+ log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+
+ for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
+ log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
+ encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
+ encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
+ log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
+ log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+ }
+ }
+
+ Ok((msgs::CommitmentSigned {
+ channel_id: self.context.channel_id,
+ signature,
+ htlc_signatures,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
+ }
+
+ /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
+ /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
+ ///
+ /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
+ /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
+ pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+ let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
+ if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
+ match send_res? {
+ Some(_) => {
+ let monitor_update = self.build_commitment_no_status_check(logger);
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ Ok(self.push_ret_blockable_mon_update(monitor_update))
+ },
+ None => Ok(None)
+ }
+ }
+
+ pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
+ if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
+ return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
+ }
+ self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
+ fee_base_msat: msg.contents.fee_base_msat,
+ fee_proportional_millionths: msg.contents.fee_proportional_millionths,
+ cltv_expiry_delta: msg.contents.cltv_expiry_delta
+ });
+
+ Ok(())
+ }
+
+ /// Begins the shutdown process, getting a message for the remote peer and returning all
+ /// holding cell HTLCs for payment failure.
+ ///
+ /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
+ /// [`ChannelMonitorUpdate`] will be returned).
+ pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
+ target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
+ -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+ where SP::Target: SignerProvider {
+ for htlc in self.context.pending_outbound_htlcs.iter() {
+ if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+ return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
+ }
+ }
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
+ if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
+ return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
+ }
+ else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
+ return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
+ }
+ }
+ if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
+ return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
+ }
+
+ // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
+ // script is set, we just force-close and call it a day.
+ let mut chan_closed = false;
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ chan_closed = true;
+ }
+
+ let update_shutdown_script = match self.context.shutdown_scriptpubkey {
+ Some(_) => false,
+ None if !chan_closed => {
+ // use override shutdown script if provided
+ let shutdown_scriptpubkey = match override_shutdown_script {
+ Some(script) => script,
+ None => {
+ // otherwise, use the shutdown scriptpubkey provided by the signer
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
+ },
+ };
+ if !shutdown_scriptpubkey.is_compatible(their_features) {
+ return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+ }
+ self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+ true
+ },
+ None => false,
+ };
+
+ // From here on out, we may not fail!
+ self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ } else {
+ self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+ }
+ self.context.update_time_counter += 1;
+
+ let monitor_update = if update_shutdown_script {
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ }],
+ };
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ if self.push_blockable_mon_update(monitor_update) {
+ self.context.pending_monitor_updates.last().map(|upd| &upd.update)
+ } else { None }
+ } else { None };
+ let shutdown = msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ };
+
+ // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
+ // our shutdown until we've committed all of the pending changes.
+ self.context.holding_cell_update_fee = None;
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
+ self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
+ dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
+ false
+ },
+ _ => true
+ }
+ });
+
+ debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
+ "we can't both complete shutdown and return a monitor update");
+
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
+ }
+
+ pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
+ self.context.holding_cell_htlc_updates.iter()
+ .flat_map(|htlc_update| {
+ match htlc_update {
+ HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
+ => Some((source, payment_hash)),
+ _ => None,
+ }
+ })
+ .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
+ }
+}
+
+/// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
+pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
+ pub context: ChannelContext<Signer>,
+}
+
+impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
+ pub fn new<ES: Deref, SP: Deref, F: Deref>(
+ fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
+ channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
+ outbound_scid_alias: u64
+ ) -> Result<OutboundV1Channel<Signer>, APIError>
+ where ES::Target: EntropySource,
+ SP::Target: SignerProvider<Signer = Signer>,
+ F::Target: FeeEstimator,
+ {
+ let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
+ let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
+
+ if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
+ }
+ if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
+ }
+ let channel_value_msat = channel_value_satoshis * 1000;
+ if push_msat > channel_value_msat {
+ return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
+ }
+ if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
+ return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
+ }
+ let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
+ if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ // Protocol level safety check in place, although it should never happen because
+ // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+ return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
+ }
+
+ let channel_type = Self::get_initial_channel_type(&config, their_features);
+ debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
+
+ let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+
+ let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
+ let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
+ if value_to_self_msat < commitment_tx_fee {
+ return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
+ }
+
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }