+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ return MonitorRestoreUpdates {
+ raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
+ accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ };
+ }
+
+ let raa = if self.context.monitor_pending_revoke_and_ack {
+ Some(self.get_last_revoke_and_ack())
+ } else { None };
+ let commitment_update = if self.context.monitor_pending_commitment_signed {
+ self.mark_awaiting_response();
+ Some(self.get_last_commitment_update(logger))
+ } else { None };
+
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ let order = self.context.resend_order.clone();
+ log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
+ log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+ if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
+ match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
+ MonitorRestoreUpdates {
+ raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ }
+ }
+
+ pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.is_outbound() {
+ return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
+ }
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
+ let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
+
+ self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
+ self.context.update_time_counter += 1;
+ // If the feerate has increased over the previous dust buffer (note that
+ // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
+ // won't be pushed over our dust exposure limit by the feerate increase.
+ if feerate_over_dust_buffer {
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+ msg.feerate_per_kw, holder_tx_dust_exposure)));
+ }
+ if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+ msg.feerate_per_kw, counterparty_tx_dust_exposure)));
+ }
+ }
+ Ok(())
+ }
+
+ fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
+ msgs::RevokeAndACK {
+ channel_id: self.context.channel_id,
+ per_commitment_secret,
+ next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ }
+ }
+
+ fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
+ let mut update_add_htlcs = Vec::new();
+ let mut update_fulfill_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
+
+ for htlc in self.context.pending_outbound_htlcs.iter() {
+ if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
+ update_add_htlcs.push(msgs::UpdateAddHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ amount_msat: htlc.amount_msat,
+ payment_hash: htlc.payment_hash,
+ cltv_expiry: htlc.cltv_expiry,
+ onion_routing_packet: (**onion_packet).clone(),
+ skimmed_fee_msat: htlc.skimmed_fee_msat,
+ });
+ }
+ }
+
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ match reason {
+ &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
+ update_fail_htlcs.push(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ reason: err_packet.clone()
+ });
+ },
+ &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
+ update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ sha256_of_onion: sha256_of_onion.clone(),
+ failure_code: failure_code.clone(),
+ });
+ },
+ &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
+ update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ payment_preimage: payment_preimage.clone(),
+ });
+ },
+ }
+ }
+ }
+
+ let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
+ Some(msgs::UpdateFee {
+ channel_id: self.context.channel_id(),
+ feerate_per_kw: self.context.pending_update_fee.unwrap().0,
+ })
+ } else { None };
+
+ log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
+ log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
+ msgs::CommitmentUpdate {
+ update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
+ commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
+ }
+ }
+
+ /// May panic if some calls other than message-handling calls (which will all Err immediately)
+ /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
+ ///
+ /// Some links printed in log lines are included here to check them during build (when run with
+ /// `cargo doc --document-private-items`):
+ /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
+ /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
+ pub fn channel_reestablish<L: Deref, NS: Deref>(
+ &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
+ genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
+ ) -> Result<ReestablishResponses, ChannelError>
+ where
+ L::Target: Logger,
+ NS::Target: NodeSigner
+ {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
+ // almost certainly indicates we are going to end up out-of-sync in some way, so we
+ // just close here instead of trying to recover.
+ return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
+ }
+
+ if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
+ msg.next_local_commitment_number == 0 {
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
+ }
+
+ if msg.next_remote_commitment_number > 0 {
+ let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
+ let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+ if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+ }
+ if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+ macro_rules! log_and_panic {
+ ($err_msg: expr) => {
+ log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
+ panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
+ }
+ }
+ log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
+ This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
+ More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
+ If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
+ ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
+ ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
+ Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
+ See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
+ }
+ }
+
+ // Before we change the state of the channel, we check if the peer is sending a very old
+ // commitment transaction number, if yes we send a warning message.
+ let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
+ if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
+ return Err(
+ ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
+ );
+ }
+
+ // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
+ // remaining cases either succeed or ErrorMessage-fail).
+ self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
+ self.context.sent_message_awaiting_response = None;
+
+ let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ Some(msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ })
+ } else { None };
+
+ let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
+
+ if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
+ // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
+ if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
+ self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if msg.next_remote_commitment_number != 0 {
+ return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
+ }
+ // Short circuit the whole handler as there is nothing we can resend them
+ return Ok(ReestablishResponses {
+ channel_ready: None,
+ raa: None, commitment_update: None,
+ order: RAACommitmentOrder::CommitmentFirst,
+ shutdown_msg, announcement_sigs,
+ });
+ }
+
+ // We have OurChannelReady set!
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ return Ok(ReestablishResponses {
+ channel_ready: Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ }),
+ raa: None, commitment_update: None,
+ order: RAACommitmentOrder::CommitmentFirst,
+ shutdown_msg, announcement_sigs,
+ });
+ }
+
+ let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+ // Remote isn't waiting on any RevokeAndACK from us!
+ // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
+ None
+ } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
+ if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ self.context.monitor_pending_revoke_and_ack = true;
+ None
+ } else {
+ Some(self.get_last_revoke_and_ack())
+ }
+ } else {
+ return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
+ };
+
+ // We increment cur_counterparty_commitment_transaction_number only upon receipt of
+ // revoke_and_ack, not on sending commitment_signed, so we add one if have
+ // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
+ // the corresponding revoke_and_ack back yet.
+ let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
+ if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
+ self.mark_awaiting_response();
+ }
+ let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
+
+ let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
+ // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ })
+ } else { None };
+
+ if msg.next_local_commitment_number == next_counterparty_commitment_number {
+ if required_revoke.is_some() {
+ log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
+ } else {
+ log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
+ }
+
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ raa: required_revoke,
+ commitment_update: None,
+ order: self.context.resend_order.clone(),
+ })
+ } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
+ if required_revoke.is_some() {
+ log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
+ } else {
+ log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
+ }
+
+ if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ self.context.monitor_pending_commitment_signed = true;
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ commitment_update: None, raa: None,
+ order: self.context.resend_order.clone(),
+ })
+ } else {
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ raa: required_revoke,
+ commitment_update: Some(self.get_last_commitment_update(logger)),
+ order: self.context.resend_order.clone(),
+ })
+ }
+ } else {
+ Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
+ }
+ }
+
+ /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
+ /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
+ /// at which point they will be recalculated.
+ fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+ -> (u64, u64)
+ where F::Target: FeeEstimator
+ {
+ if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
+
+ // Propose a range from our current Background feerate to our Normal feerate plus our
+ // force_close_avoidance_max_fee_satoshis.
+ // If we fail to come to consensus, we'll have to force-close.
+ let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
+ let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
+
+ // The spec requires that (when the channel does not have anchors) we only send absolute
+ // channel fees no greater than the absolute channel fee on the current commitment
+ // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
+ // very good reason to apply such a limit in any case. We don't bother doing so, risking
+ // some force-closure by old nodes, but we wanted to close the channel anyway.
+
+ if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
+ let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
+ proposed_feerate = cmp::max(proposed_feerate, min_feerate);
+ proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
+ }
+
+ // Note that technically we could end up with a lower minimum fee if one sides' balance is
+ // below our dust limit, causing the output to disappear. We don't bother handling this
+ // case, however, as this should only happen if a channel is closed before any (material)
+ // payments have been made on it. This may cause slight fee overpayment and/or failure to
+ // come to consensus with our counterparty on appropriate fees, however it should be a
+ // relatively rare case. We can revisit this later, though note that in order to determine
+ // if the funders' output is dust we have to know the absolute fee we're going to use.
+ let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
+ let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
+ let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
+ // We always add force_close_avoidance_max_fee_satoshis to our normal
+ // feerate-calculated fee, but allow the max to be overridden if we're using a
+ // target feerate-calculated fee.
+ cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
+ proposed_max_feerate as u64 * tx_weight / 1000)
+ } else {
+ self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
+ };
+
+ self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
+ self.context.closing_fee_limits.clone().unwrap()
+ }
+
+ /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
+ /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
+ /// this point if we're the funder we should send the initial closing_signed, and in any case
+ /// shutdown should complete within a reasonable timeframe.
+ fn closing_negotiation_ready(&self) -> bool {
+ self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
+ self.context.channel_state &
+ (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
+ ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
+ == BOTH_SIDES_SHUTDOWN_MASK &&
+ self.context.pending_update_fee.is_none()
+ }
+
+ /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
+ /// an Err if no progress is being made and the channel should be force-closed instead.
+ /// Should be called on a one-minute timer.
+ pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
+ if self.closing_negotiation_ready() {
+ if self.context.closing_signed_in_flight {
+ return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
+ } else {
+ self.context.closing_signed_in_flight = true;
+ }
+ }
+ Ok(())
+ }
+
+ pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
+ return Ok((None, None));
+ }
+
+ if !self.context.is_outbound() {
+ if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
+ return self.closing_signed(fee_estimator, &msg);
+ }
+ return Ok((None, None));
+ }
+
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
+ log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
+ our_min_fee, our_max_fee, total_fee_satoshis);
+
+ let sig = self.context.holder_signer
+ .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
+ .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
+
+ self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
+ Ok((Some(msgs::ClosingSigned {
+ channel_id: self.context.channel_id,
+ fee_satoshis: total_fee_satoshis,
+ signature: sig,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), None))
+ }
+
+ // Marks a channel as waiting for a response from the counterparty. If it's not received
+ // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
+ // a reconnection.
+ fn mark_awaiting_response(&mut self) {
+ self.context.sent_message_awaiting_response = Some(0);
+ }
+
+ /// Determines whether we should disconnect the counterparty due to not receiving a response
+ /// within our expected timeframe.
+ ///
+ /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+ pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
+ let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
+ ticks_elapsed
+ } else {
+ // Don't disconnect when we're not waiting on a response.
+ return false;
+ };
+ *ticks_elapsed += 1;
+ *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
+ }
+
+ pub fn shutdown<SP: Deref>(
+ &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
+ ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ where SP::Target: SignerProvider
+ {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
+ }
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ // Spec says we should fail the connection, not the channel, but that's nonsense, there
+ // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
+ // can do that via error message without getting a connection fail anyway...
+ return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
+ }
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
+ return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
+ }
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+
+ if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
+ return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
+ }
+
+ if self.context.counterparty_shutdown_scriptpubkey.is_some() {
+ if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
+ return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
+ }
+ } else {
+ self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
+ }
+
+ // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
+ // immediately after the commitment dance, but we can send a Shutdown because we won't send
+ // any further commitment updates after we set LocalShutdownSent.
+ let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
+
+ let update_shutdown_script = match self.context.shutdown_scriptpubkey {
+ Some(_) => false,
+ None => {
+ assert!(send_shutdown);
+ let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+ };
+ if !shutdown_scriptpubkey.is_compatible(their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ }
+ self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+ true
+ },
+ };
+
+ // From here on out, we may not fail!
+
+ self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
+ self.context.update_time_counter += 1;
+
+ let monitor_update = if update_shutdown_script {
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ }],
+ };
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ if self.push_blockable_mon_update(monitor_update) {
+ self.context.pending_monitor_updates.last().map(|upd| &upd.update)
+ } else { None }
+ } else { None };
+ let shutdown = if send_shutdown {
+ Some(msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ })
+ } else { None };
+
+ // We can't send our shutdown until we've committed all of our pending HTLCs, but the
+ // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
+ // cell HTLCs and return them to fail the payment.
+ self.context.holding_cell_update_fee = None;
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
+ self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
+ dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
+ false
+ },
+ _ => true
+ }
+ });
+
+ self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+ self.context.update_time_counter += 1;
+
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
+ }
+
+ fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
+ let mut tx = closing_tx.trust().built_transaction().clone();
+
+ tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
+
+ let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
+ let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
+ let mut holder_sig = sig.serialize_der().to_vec();
+ holder_sig.push(EcdsaSighashType::All as u8);
+ let mut cp_sig = counterparty_sig.serialize_der().to_vec();
+ cp_sig.push(EcdsaSighashType::All as u8);
+ if funding_key[..] < counterparty_funding_key[..] {
+ tx.input[0].witness.push(holder_sig);
+ tx.input[0].witness.push(cp_sig);
+ } else {
+ tx.input[0].witness.push(cp_sig);
+ tx.input[0].witness.push(holder_sig);
+ }
+
+ tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
+ tx
+ }
+
+ pub fn closing_signed<F: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
+ where F::Target: FeeEstimator
+ {
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
+ return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
+ }
+ if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
+ return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
+ }
+ if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
+ return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
+ }
+
+ if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
+ return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
+ }
+
+ if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
+ self.context.pending_counterparty_closing_signed = Some(msg.clone());
+ return Ok((None, None));
+ }
+
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
+ if used_total_fee != msg.fee_satoshis {
+ return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
+ }
+ let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
+
+ match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ Ok(_) => {},
+ Err(_e) => {
+ // The remote end may have decided to revoke their output due to inconsistent dust
+ // limits, so check for that case by re-checking the signature here.
+ closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
+ let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
+ secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
+ },
+ };
+
+ for outp in closing_tx.trust().built_transaction().output.iter() {
+ if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
+ }
+ }
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
+ if last_fee == msg.fee_satoshis {
+ let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.update_time_counter += 1;
+ return Ok((None, Some(tx)));
+ }
+ }
+
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+
+ macro_rules! propose_fee {
+ ($new_fee: expr) => {
+ let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
+ (closing_tx, $new_fee)
+ } else {
+ self.build_closing_transaction($new_fee, false)
+ };
+
+ let sig = self.context.holder_signer
+ .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
+
+ let signed_tx = if $new_fee == msg.fee_satoshis {
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.update_time_counter += 1;
+ let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
+ Some(tx)
+ } else { None };
+
+ self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
+ return Ok((Some(msgs::ClosingSigned {
+ channel_id: self.context.channel_id,
+ fee_satoshis: used_fee,
+ signature: sig,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), signed_tx))
+ }
+ }
+
+ if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
+ if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
+ return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
+ }
+ if max_fee_satoshis < our_min_fee {
+ return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
+ }
+ if min_fee_satoshis > our_max_fee {
+ return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
+ }
+
+ if !self.context.is_outbound() {
+ // They have to pay, so pick the highest fee in the overlapping range.
+ // We should never set an upper bound aside from their full balance
+ debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
+ propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
+ } else {
+ if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
+ return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
+ msg.fee_satoshis, our_min_fee, our_max_fee)));
+ }
+ // The proposed fee is in our acceptable range, accept it and broadcast!
+ propose_fee!(msg.fee_satoshis);
+ }
+ } else {
+ // Old fee style negotiation. We don't bother to enforce whether they are complying
+ // with the "making progress" requirements, we just comply and hope for the best.
+ if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
+ if msg.fee_satoshis > last_fee {
+ if msg.fee_satoshis < our_max_fee {
+ propose_fee!(msg.fee_satoshis);
+ } else if last_fee < our_max_fee {
+ propose_fee!(our_max_fee);
+ } else {
+ return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
+ }
+ } else {
+ if msg.fee_satoshis > our_min_fee {
+ propose_fee!(msg.fee_satoshis);
+ } else if last_fee > our_min_fee {
+ propose_fee!(our_min_fee);
+ } else {
+ return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
+ }
+ }
+ } else {
+ if msg.fee_satoshis < our_min_fee {
+ propose_fee!(our_min_fee);
+ } else if msg.fee_satoshis > our_max_fee {
+ propose_fee!(our_max_fee);
+ } else {
+ propose_fee!(msg.fee_satoshis);
+ }
+ }
+ }
+ }
+
+ fn internal_htlc_satisfies_config(
+ &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
+ ) -> Result<(), (&'static str, u16)> {
+ let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
+ .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
+ if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
+ (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
+ return Err((
+ "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
+ 0x1000 | 12, // fee_insufficient
+ ));
+ }
+ if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
+ return Err((
+ "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+ 0x1000 | 13, // incorrect_cltv_expiry
+ ));
+ }
+ Ok(())
+ }
+
+ /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
+ /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
+ /// unsuccessful, falls back to the previous one if one exists.
+ pub fn htlc_satisfies_config(
+ &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
+ ) -> Result<(), (&'static str, u16)> {
+ self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
+ .or_else(|err| {
+ if let Some(prev_config) = self.context.prev_config() {
+ self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
+ } else {
+ Err(err)
+ }
+ })
+ }
+
+ pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_holder_commitment_transaction_number + 1
+ }
+
+ pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
+ }
+
+ pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_counterparty_commitment_transaction_number + 2
+ }
+
+ #[cfg(test)]
+ pub fn get_signer(&self) -> &Signer {
+ &self.context.holder_signer
+ }
+
+ #[cfg(test)]
+ pub fn get_value_stat(&self) -> ChannelValueStat {
+ ChannelValueStat {
+ value_to_self_msat: self.context.value_to_self_msat,
+ channel_value_msat: self.context.channel_value_satoshis * 1000,
+ channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
+ pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
+ pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
+ holding_cell_outbound_amount_msat: {
+ let mut res = 0;
+ for h in self.context.holding_cell_htlc_updates.iter() {
+ match h {
+ &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
+ res += amount_msat;
+ }
+ _ => {}
+ }
+ }
+ res
+ },
+ counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
+ counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
+ }
+ }
+
+ /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
+ /// Allowed in any state (including after shutdown)
+ pub fn is_awaiting_monitor_update(&self) -> bool {
+ (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
+ }
+
+ pub fn get_latest_complete_monitor_update_id(&self) -> u64 {
+ if self.context.pending_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
+ self.context.pending_monitor_updates[0].update.update_id - 1
+ }
+
+ /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
+ /// further blocked monitor update exists after the next.
+ pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(&ChannelMonitorUpdate, bool)> {
+ for i in 0..self.context.pending_monitor_updates.len() {
+ if self.context.pending_monitor_updates[i].blocked {
+ self.context.pending_monitor_updates[i].blocked = false;
+ return Some((&self.context.pending_monitor_updates[i].update,
+ self.context.pending_monitor_updates.len() > i + 1));
+ }
+ }
+ None
+ }
+
+ /// Pushes a new monitor update into our monitor update queue, returning whether it should be
+ /// immediately given to the user for persisting or if it should be held as blocked.
+ fn push_blockable_mon_update(&mut self, update: ChannelMonitorUpdate) -> bool {
+ let release_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+ self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update, blocked: !release_monitor
+ });
+ release_monitor
+ }
+
+ /// Pushes a new monitor update into our monitor update queue, returning a reference to it if
+ /// it should be immediately given to the user for persisting or `None` if it should be held as
+ /// blocked.
+ fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
+ -> Option<&ChannelMonitorUpdate> {
+ let release_monitor = self.push_blockable_mon_update(update);
+ if release_monitor { self.context.pending_monitor_updates.last().map(|upd| &upd.update) } else { None }
+ }
+
+ pub fn no_monitor_updates_pending(&self) -> bool {
+ self.context.pending_monitor_updates.is_empty()
+ }
+
+ pub fn complete_all_mon_updates_through(&mut self, update_id: u64) {
+ self.context.pending_monitor_updates.retain(|upd| {
+ if upd.update.update_id <= update_id {
+ assert!(!upd.blocked, "Completed update must have flown");
+ false
+ } else { true }
+ });
+ }
+
+ pub fn complete_one_mon_update(&mut self, update_id: u64) {
+ self.context.pending_monitor_updates.retain(|upd| upd.update.update_id != update_id);
+ }
+
+ /// Returns an iterator over all unblocked monitor updates which have not yet completed.
+ pub fn uncompleted_unblocked_mon_updates(&self) -> impl Iterator<Item=&ChannelMonitorUpdate> {
+ self.context.pending_monitor_updates.iter()
+ .filter_map(|upd| if upd.blocked { None } else { Some(&upd.update) })
+ }
+
+ /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
+ /// If the channel is outbound, this implies we have not yet broadcasted the funding
+ /// transaction. If the channel is inbound, this implies simply that the channel has not
+ /// advanced state.
+ pub fn is_awaiting_initial_mon_persist(&self) -> bool {
+ if !self.is_awaiting_monitor_update() { return false; }
+ if self.context.channel_state &
+ !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
+ == ChannelState::FundingSent as u32 {
+ // If we're not a 0conf channel, we'll be waiting on a monitor update with only
+ // FundingSent set, though our peer could have sent their channel_ready.
+ debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
+ return true;
+ }
+ if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
+ self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
+ // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
+ // waiting for the initial monitor persistence. Thus, we check if our commitment
+ // transaction numbers have both been iterated only exactly once (for the
+ // funding_signed), and we're awaiting monitor update.
+ //
+ // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
+ // only way to get an awaiting-monitor-update state during initial funding is if the
+ // initial monitor persistence is still pending).
+ //
+ // Because deciding we're awaiting initial broadcast spuriously could result in
+ // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
+ // we hard-assert here, even in production builds.
+ if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
+ assert!(self.context.monitor_pending_channel_ready);
+ assert_eq!(self.context.latest_monitor_update_id, 0);
+ return true;
+ }
+ false
+ }
+
+ /// Returns true if our channel_ready has been sent
+ pub fn is_our_channel_ready(&self) -> bool {
+ (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state >= ChannelState::ChannelReady as u32
+ }
+
+ /// Returns true if our peer has either initiated or agreed to shut down the channel.
+ pub fn received_shutdown(&self) -> bool {
+ (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
+ }
+
+ /// Returns true if we either initiated or agreed to shut down the channel.
+ pub fn sent_shutdown(&self) -> bool {
+ (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
+ }
+
+ /// Returns true if this channel is fully shut down. True here implies that no further actions
+ /// may/will be taken on this channel, and thus this object should be freed. Any future changes
+ /// will be handled appropriately by the chain monitor.
+ pub fn is_shutdown(&self) -> bool {
+ if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
+ assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
+ true
+ } else { false }
+ }
+
+ pub fn channel_update_status(&self) -> ChannelUpdateStatus {
+ self.context.channel_update_status
+ }
+
+ pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
+ self.context.update_time_counter += 1;
+ self.context.channel_update_status = status;
+ }
+
+ fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
+ // Called:
+ // * always when a new block/transactions are confirmed with the new height
+ // * when funding is signed with a height of 0
+ if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
+ return None;
+ }
+
+ let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
+ if funding_tx_confirmations <= 0 {
+ self.context.funding_tx_confirmation_height = 0;
+ }
+
+ if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
+ return None;
+ }
+
+ let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
+ let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
+ self.context.channel_state |= ChannelState::OurChannelReady as u32;
+ true
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
+ self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+ self.context.update_time_counter += 1;
+ true
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+ // We got a reorg but not enough to trigger a force close, just ignore.
+ false
+ } else {
+ if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state < ChannelState::ChannelReady as u32 {
+ // We should never see a funding transaction on-chain until we've received
+ // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
+ // an inbound channel - before that we have no known funding TXID). The fuzzer,
+ // however, may do this and we shouldn't treat it as a bug.
+ #[cfg(not(fuzzing))]
+ panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
+ Do NOT broadcast a funding transaction manually - let LDK do it for you!",
+ self.context.channel_state);
+ }
+ // We got a reorg but not enough to trigger a force close, just ignore.
+ false
+ };
+
+ if need_commitment_update {
+ if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ let next_per_commitment_point =
+ self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
+ return Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id,
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ });
+ }
+ } else {
+ self.context.monitor_pending_channel_ready = true;
+ }
+ }
+ None
+ }
+
+ /// When a transaction is confirmed, we check whether it is or spends the funding transaction
+ /// In the first case, we store the confirmation height and calculating the short channel id.
+ /// In the second, we simply return an Err indicating we need to be force-closed now.
+ pub fn transactions_confirmed<NS: Deref, L: Deref>(
+ &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
+ genesis_block_hash: BlockHash, node_signer: &NS, user_config: &UserConfig, logger: &L
+ ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ if let Some(funding_txo) = self.context.get_funding_txo() {
+ for &(index_in_block, tx) in txdata.iter() {
+ // Check if the transaction is the expected funding transaction, and if it is,
+ // check that it pays the right amount to the right script.
+ if self.context.funding_tx_confirmation_height == 0 {
+ if tx.txid() == funding_txo.txid {
+ let txo_idx = funding_txo.index as usize;
+ if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
+ tx.output[txo_idx].value != self.context.channel_value_satoshis {
+ if self.context.is_outbound() {
+ // If we generated the funding transaction and it doesn't match what it
+ // should, the client is really broken and we should just panic and
+ // tell them off. That said, because hash collisions happen with high
+ // probability in fuzzing mode, if we're fuzzing we just close the
+ // channel and move on.
+ #[cfg(not(fuzzing))]
+ panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+ }
+ self.context.update_time_counter += 1;
+ let err_reason = "funding tx had wrong script/value or output index";
+ return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
+ } else {
+ if self.context.is_outbound() {
+ for input in tx.input.iter() {
+ if input.witness.is_empty() {
+ // We generated a malleable funding transaction, implying we've
+ // just exposed ourselves to funds loss to our counterparty.
+ #[cfg(not(fuzzing))]
+ panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+ }
+ }
+ }
+ self.context.funding_tx_confirmation_height = height;
+ self.context.funding_tx_confirmed_in = Some(*block_hash);
+ self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
+ Ok(scid) => Some(scid),
+ Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
+ }
+ }
+ }
+ // If we allow 1-conf funding, we may need to check for channel_ready here and
+ // send it immediately instead of waiting for a best_block_updated call (which
+ // may have already happened for this block).
+ if let Some(channel_ready) = self.check_get_channel_ready(height) {
+ log_info!(logger, "Sending a channel_ready to our peer for channel {}", log_bytes!(self.context.channel_id));
+ let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, height, logger);
+ return Ok((Some(channel_ready), announcement_sigs));
+ }
+ }
+ for inp in tx.input.iter() {
+ if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
+ log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.context.channel_id()));
+ return Err(ClosureReason::CommitmentTxConfirmed);
+ }