+ /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
+ /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
+ /// this point if we're the funder we should send the initial closing_signed, and in any case
+ /// shutdown should complete within a reasonable timeframe.
+ fn closing_negotiation_ready(&self) -> bool {
+ self.context.closing_negotiation_ready()
+ }
+
+ /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
+ /// an Err if no progress is being made and the channel should be force-closed instead.
+ /// Should be called on a one-minute timer.
+ pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
+ if self.closing_negotiation_ready() {
+ if self.context.closing_signed_in_flight {
+ return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
+ } else {
+ self.context.closing_signed_in_flight = true;
+ }
+ }
+ Ok(())
+ }
+
+ pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
+ return Ok((None, None, None));
+ }
+
+ if !self.context.is_outbound() {
+ if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
+ return self.closing_signed(fee_estimator, &msg);
+ }
+ return Ok((None, None, None));
+ }
+
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
+ log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
+ our_min_fee, our_max_fee, total_fee_satoshis);
+
+ match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ let sig = ecdsa
+ .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
+ .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
+
+ self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
+ Ok((Some(msgs::ClosingSigned {
+ channel_id: self.context.channel_id,
+ fee_satoshis: total_fee_satoshis,
+ signature: sig,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), None, None))
+ }
+ }
+ }
+
+ // Marks a channel as waiting for a response from the counterparty. If it's not received
+ // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
+ // a reconnection.
+ fn mark_awaiting_response(&mut self) {
+ self.context.sent_message_awaiting_response = Some(0);
+ }
+
+ /// Determines whether we should disconnect the counterparty due to not receiving a response
+ /// within our expected timeframe.
+ ///
+ /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+ pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
+ let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
+ ticks_elapsed
+ } else {
+ // Don't disconnect when we're not waiting on a response.
+ return false;
+ };
+ *ticks_elapsed += 1;
+ *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
+ }
+
+ pub fn shutdown(
+ &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
+ ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
+ }
+ if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
+ // Spec says we should fail the connection, not the channel, but that's nonsense, there
+ // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
+ // can do that via error message without getting a connection fail anyway...
+ return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
+ }
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
+ return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
+ }
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+
+ if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
+ return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
+ }
+
+ if self.context.counterparty_shutdown_scriptpubkey.is_some() {
+ if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
+ return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
+ }
+ } else {
+ self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
+ }
+
+ // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
+ // immediately after the commitment dance, but we can send a Shutdown because we won't send
+ // any further commitment updates after we set LocalShutdownSent.
+ let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
+
+ let update_shutdown_script = match self.context.shutdown_scriptpubkey {
+ Some(_) => false,
+ None => {
+ assert!(send_shutdown);
+ let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+ };
+ if !shutdown_scriptpubkey.is_compatible(their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ }
+ self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+ true
+ },
+ };
+
+ // From here on out, we may not fail!
+
+ self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
+ self.context.update_time_counter += 1;
+
+ let monitor_update = if update_shutdown_script {
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ }],
+ };
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ self.push_ret_blockable_mon_update(monitor_update)
+ } else { None };
+ let shutdown = if send_shutdown {
+ Some(msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ })
+ } else { None };
+
+ // We can't send our shutdown until we've committed all of our pending HTLCs, but the
+ // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
+ // cell HTLCs and return them to fail the payment.
+ self.context.holding_cell_update_fee = None;
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
+ self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
+ dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
+ false
+ },
+ _ => true
+ }
+ });
+
+ self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+ self.context.update_time_counter += 1;
+
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
+ }
+
+ fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
+ let mut tx = closing_tx.trust().built_transaction().clone();
+
+ tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
+
+ let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
+ let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
+ let mut holder_sig = sig.serialize_der().to_vec();
+ holder_sig.push(EcdsaSighashType::All as u8);
+ let mut cp_sig = counterparty_sig.serialize_der().to_vec();
+ cp_sig.push(EcdsaSighashType::All as u8);
+ if funding_key[..] < counterparty_funding_key[..] {
+ tx.input[0].witness.push(holder_sig);
+ tx.input[0].witness.push(cp_sig);
+ } else {
+ tx.input[0].witness.push(cp_sig);
+ tx.input[0].witness.push(holder_sig);
+ }
+
+ tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
+ tx
+ }
+
+ pub fn closing_signed<F: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
+ where F::Target: FeeEstimator
+ {
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
+ return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
+ }
+ if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
+ return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
+ }
+ if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
+ return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
+ }
+
+ if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
+ return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
+ }
+
+ if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
+ self.context.pending_counterparty_closing_signed = Some(msg.clone());
+ return Ok((None, None, None));
+ }
+
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
+ if used_total_fee != msg.fee_satoshis {
+ return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
+ }
+ let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
+
+ match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ Ok(_) => {},
+ Err(_e) => {
+ // The remote end may have decided to revoke their output due to inconsistent dust
+ // limits, so check for that case by re-checking the signature here.
+ closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
+ let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
+ secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
+ },
+ };
+
+ for outp in closing_tx.trust().built_transaction().output.iter() {
+ if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
+ }
+ }
+
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
+ if last_fee == msg.fee_satoshis {
+ let shutdown_result = ShutdownResult {
+ monitor_update: None,
+ dropped_outbound_htlcs: Vec::new(),
+ unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ };
+ let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.update_time_counter += 1;
+ return Ok((None, Some(tx), Some(shutdown_result)));
+ }
+ }
+
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
+
+ macro_rules! propose_fee {
+ ($new_fee: expr) => {
+ let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
+ (closing_tx, $new_fee)
+ } else {
+ self.build_closing_transaction($new_fee, false)
+ };
+
+ return match &self.context.holder_signer {
+ ChannelSignerType::Ecdsa(ecdsa) => {
+ let sig = ecdsa
+ .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
+ let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
+ let shutdown_result = ShutdownResult {
+ monitor_update: None,
+ dropped_outbound_htlcs: Vec::new(),
+ unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
+ };
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.update_time_counter += 1;
+ let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
+ (Some(tx), Some(shutdown_result))
+ } else {
+ (None, None)
+ };
+
+ self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
+ Ok((Some(msgs::ClosingSigned {
+ channel_id: self.context.channel_id,
+ fee_satoshis: used_fee,
+ signature: sig,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), signed_tx, shutdown_result))
+ }
+ }
+ }
+ }
+
+ if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
+ if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
+ return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
+ }
+ if max_fee_satoshis < our_min_fee {
+ return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
+ }
+ if min_fee_satoshis > our_max_fee {
+ return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
+ }
+
+ if !self.context.is_outbound() {
+ // They have to pay, so pick the highest fee in the overlapping range.
+ // We should never set an upper bound aside from their full balance
+ debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
+ propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
+ } else {
+ if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
+ return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
+ msg.fee_satoshis, our_min_fee, our_max_fee)));
+ }
+ // The proposed fee is in our acceptable range, accept it and broadcast!
+ propose_fee!(msg.fee_satoshis);
+ }
+ } else {
+ // Old fee style negotiation. We don't bother to enforce whether they are complying
+ // with the "making progress" requirements, we just comply and hope for the best.
+ if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
+ if msg.fee_satoshis > last_fee {
+ if msg.fee_satoshis < our_max_fee {
+ propose_fee!(msg.fee_satoshis);
+ } else if last_fee < our_max_fee {
+ propose_fee!(our_max_fee);
+ } else {
+ return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
+ }
+ } else {
+ if msg.fee_satoshis > our_min_fee {
+ propose_fee!(msg.fee_satoshis);
+ } else if last_fee > our_min_fee {
+ propose_fee!(our_min_fee);
+ } else {
+ return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
+ }
+ }
+ } else {
+ if msg.fee_satoshis < our_min_fee {
+ propose_fee!(our_min_fee);
+ } else if msg.fee_satoshis > our_max_fee {
+ propose_fee!(our_max_fee);
+ } else {
+ propose_fee!(msg.fee_satoshis);
+ }
+ }
+ }
+ }
+
+ fn internal_htlc_satisfies_config(
+ &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
+ ) -> Result<(), (&'static str, u16)> {
+ let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
+ .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
+ if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
+ (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
+ return Err((
+ "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
+ 0x1000 | 12, // fee_insufficient
+ ));
+ }
+ if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
+ return Err((
+ "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
+ 0x1000 | 13, // incorrect_cltv_expiry
+ ));
+ }
+ Ok(())
+ }
+
+ /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
+ /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
+ /// unsuccessful, falls back to the previous one if one exists.
+ pub fn htlc_satisfies_config(
+ &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
+ ) -> Result<(), (&'static str, u16)> {
+ self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
+ .or_else(|err| {
+ if let Some(prev_config) = self.context.prev_config() {
+ self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
+ } else {
+ Err(err)
+ }
+ })
+ }
+
+ pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_holder_commitment_transaction_number + 1
+ }
+
+ pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
+ }
+
+ pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
+ self.context.cur_counterparty_commitment_transaction_number + 2
+ }
+
+ #[cfg(test)]
+ pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
+ &self.context.holder_signer
+ }
+
+ #[cfg(test)]
+ pub fn get_value_stat(&self) -> ChannelValueStat {
+ ChannelValueStat {
+ value_to_self_msat: self.context.value_to_self_msat,
+ channel_value_msat: self.context.channel_value_satoshis * 1000,
+ channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
+ pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
+ pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
+ holding_cell_outbound_amount_msat: {
+ let mut res = 0;
+ for h in self.context.holding_cell_htlc_updates.iter() {
+ match h {
+ &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
+ res += amount_msat;
+ }
+ _ => {}
+ }
+ }
+ res
+ },
+ counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
+ counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
+ }
+ }
+
+ /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
+ /// Allowed in any state (including after shutdown)
+ pub fn is_awaiting_monitor_update(&self) -> bool {
+ (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
+ }
+
+ /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
+ pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
+ if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
+ self.context.blocked_monitor_updates[0].update.update_id - 1
+ }
+
+ /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
+ /// further blocked monitor update exists after the next.
+ pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
+ if self.context.blocked_monitor_updates.is_empty() { return None; }
+ Some((self.context.blocked_monitor_updates.remove(0).update,
+ !self.context.blocked_monitor_updates.is_empty()))
+ }
+
+ /// Pushes a new monitor update into our monitor update queue, returning it if it should be
+ /// immediately given to the user for persisting or `None` if it should be held as blocked.
+ fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
+ -> Option<ChannelMonitorUpdate> {
+ let release_monitor = self.context.blocked_monitor_updates.is_empty();
+ if !release_monitor {
+ self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
+ update,
+ });
+ None
+ } else {
+ Some(update)
+ }
+ }
+
+ pub fn blocked_monitor_updates_pending(&self) -> usize {
+ self.context.blocked_monitor_updates.len()
+ }
+
+ /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
+ /// If the channel is outbound, this implies we have not yet broadcasted the funding
+ /// transaction. If the channel is inbound, this implies simply that the channel has not
+ /// advanced state.
+ pub fn is_awaiting_initial_mon_persist(&self) -> bool {
+ if !self.is_awaiting_monitor_update() { return false; }
+ if self.context.channel_state &
+ !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
+ == ChannelState::FundingSent as u32 {
+ // If we're not a 0conf channel, we'll be waiting on a monitor update with only
+ // FundingSent set, though our peer could have sent their channel_ready.
+ debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
+ return true;
+ }
+ if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
+ self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
+ // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
+ // waiting for the initial monitor persistence. Thus, we check if our commitment
+ // transaction numbers have both been iterated only exactly once (for the
+ // funding_signed), and we're awaiting monitor update.
+ //
+ // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
+ // only way to get an awaiting-monitor-update state during initial funding is if the
+ // initial monitor persistence is still pending).
+ //
+ // Because deciding we're awaiting initial broadcast spuriously could result in
+ // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
+ // we hard-assert here, even in production builds.
+ if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
+ assert!(self.context.monitor_pending_channel_ready);
+ assert_eq!(self.context.latest_monitor_update_id, 0);
+ return true;
+ }
+ false
+ }
+
+ /// Returns true if our channel_ready has been sent
+ pub fn is_our_channel_ready(&self) -> bool {
+ (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
+ }
+
+ /// Returns true if our peer has either initiated or agreed to shut down the channel.
+ pub fn received_shutdown(&self) -> bool {
+ (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
+ }
+
+ /// Returns true if we either initiated or agreed to shut down the channel.
+ pub fn sent_shutdown(&self) -> bool {
+ (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
+ }
+
+ /// Returns true if this channel is fully shut down. True here implies that no further actions
+ /// may/will be taken on this channel, and thus this object should be freed. Any future changes
+ /// will be handled appropriately by the chain monitor.
+ pub fn is_shutdown(&self) -> bool {
+ if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
+ assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
+ true
+ } else { false }
+ }
+
+ pub fn channel_update_status(&self) -> ChannelUpdateStatus {
+ self.context.channel_update_status
+ }
+
+ pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
+ self.context.update_time_counter += 1;
+ self.context.channel_update_status = status;
+ }
+
+ fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
+ // Called:
+ // * always when a new block/transactions are confirmed with the new height
+ // * when funding is signed with a height of 0
+ if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
+ return None;
+ }
+
+ let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
+ if funding_tx_confirmations <= 0 {
+ self.context.funding_tx_confirmation_height = 0;
+ }
+
+ if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
+ return None;
+ }
+
+ // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
+ // channel_ready until the entire batch is ready.
+ let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
+ let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
+ self.context.channel_state |= ChannelState::OurChannelReady as u32;
+ true
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
+ self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+ self.context.update_time_counter += 1;
+ true
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+ // We got a reorg but not enough to trigger a force close, just ignore.
+ false
+ } else {
+ if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
+ // We should never see a funding transaction on-chain until we've received
+ // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
+ // an inbound channel - before that we have no known funding TXID). The fuzzer,
+ // however, may do this and we shouldn't treat it as a bug.
+ #[cfg(not(fuzzing))]
+ panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
+ Do NOT broadcast a funding transaction manually - let LDK do it for you!",
+ self.context.channel_state);
+ }
+ // We got a reorg but not enough to trigger a force close, just ignore.
+ false
+ };
+
+ if need_commitment_update {
+ if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ let next_per_commitment_point =
+ self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
+ return Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id,
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ });
+ }
+ } else {
+ self.context.monitor_pending_channel_ready = true;
+ }
+ }
+ None
+ }
+
+ /// When a transaction is confirmed, we check whether it is or spends the funding transaction
+ /// In the first case, we store the confirmation height and calculating the short channel id.
+ /// In the second, we simply return an Err indicating we need to be force-closed now.
+ pub fn transactions_confirmed<NS: Deref, L: Deref>(
+ &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
+ chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
+ ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ let mut msgs = (None, None);
+ if let Some(funding_txo) = self.context.get_funding_txo() {
+ for &(index_in_block, tx) in txdata.iter() {
+ // Check if the transaction is the expected funding transaction, and if it is,
+ // check that it pays the right amount to the right script.
+ if self.context.funding_tx_confirmation_height == 0 {
+ if tx.txid() == funding_txo.txid {
+ let txo_idx = funding_txo.index as usize;
+ if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
+ tx.output[txo_idx].value != self.context.channel_value_satoshis {
+ if self.context.is_outbound() {
+ // If we generated the funding transaction and it doesn't match what it
+ // should, the client is really broken and we should just panic and
+ // tell them off. That said, because hash collisions happen with high
+ // probability in fuzzing mode, if we're fuzzing we just close the
+ // channel and move on.
+ #[cfg(not(fuzzing))]
+ panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+ }
+ self.context.update_time_counter += 1;
+ let err_reason = "funding tx had wrong script/value or output index";
+ return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
+ } else {
+ if self.context.is_outbound() {
+ if !tx.is_coin_base() {
+ for input in tx.input.iter() {
+ if input.witness.is_empty() {
+ // We generated a malleable funding transaction, implying we've
+ // just exposed ourselves to funds loss to our counterparty.
+ #[cfg(not(fuzzing))]
+ panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
+ }
+ }
+ }
+ }
+ self.context.funding_tx_confirmation_height = height;
+ self.context.funding_tx_confirmed_in = Some(*block_hash);
+ self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
+ Ok(scid) => Some(scid),
+ Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
+ }
+ }
+ // If this is a coinbase transaction and not a 0-conf channel
+ // we should update our min_depth to 100 to handle coinbase maturity
+ if tx.is_coin_base() &&
+ self.context.minimum_depth.unwrap_or(0) > 0 &&
+ self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
+ self.context.minimum_depth = Some(COINBASE_MATURITY);
+ }
+ }
+ // If we allow 1-conf funding, we may need to check for channel_ready here and
+ // send it immediately instead of waiting for a best_block_updated call (which
+ // may have already happened for this block).
+ if let Some(channel_ready) = self.check_get_channel_ready(height) {
+ log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
+ let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
+ msgs = (Some(channel_ready), announcement_sigs);
+ }
+ }
+ for inp in tx.input.iter() {
+ if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
+ log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
+ return Err(ClosureReason::CommitmentTxConfirmed);
+ }
+ }
+ }
+ }
+ Ok(msgs)
+ }
+
+ /// When a new block is connected, we check the height of the block against outbound holding
+ /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
+ /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
+ /// handled by the ChannelMonitor.
+ ///
+ /// If we return Err, the channel may have been closed, at which point the standard
+ /// requirements apply - no calls may be made except those explicitly stated to be allowed
+ /// post-shutdown.
+ ///
+ /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
+ /// back.
+ pub fn best_block_updated<NS: Deref, L: Deref>(
+ &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
+ node_signer: &NS, user_config: &UserConfig, logger: &L
+ ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
+ }
+
+ fn do_best_block_updated<NS: Deref, L: Deref>(
+ &mut self, height: u32, highest_header_time: u32,
+ chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
+ ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ let mut timed_out_htlcs = Vec::new();
+ // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
+ // forward an HTLC when our counterparty should almost certainly just fail it for expiring
+ // ~now.
+ let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
+ self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
+ if *cltv_expiry <= unforwarded_htlc_cltv_limit {
+ timed_out_htlcs.push((source.clone(), payment_hash.clone()));
+ false
+ } else { true }
+ },
+ _ => true
+ }
+ });
+
+ self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
+
+ if let Some(channel_ready) = self.check_get_channel_ready(height) {
+ let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
+ self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
+ } else { None };
+ log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
+ return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
+ }
+
+ let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
+ if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
+ (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
+ let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
+ if self.context.funding_tx_confirmation_height == 0 {
+ // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
+ // zero if it has been reorged out, however in either case, our state flags
+ // indicate we've already sent a channel_ready
+ funding_tx_confirmations = 0;
+ }
+
+ // If we've sent channel_ready (or have both sent and received channel_ready), and
+ // the funding transaction has become unconfirmed,
+ // close the channel and hope we can get the latest state on chain (because presumably
+ // the funding transaction is at least still in the mempool of most nodes).
+ //
+ // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
+ // 0-conf channel, but not doing so may lead to the
+ // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
+ // to.
+ if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
+ let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
+ self.context.minimum_depth.unwrap(), funding_tx_confirmations);
+ return Err(ClosureReason::ProcessingError { err: err_reason });
+ }
+ } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
+ height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
+ log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
+ // If funding_tx_confirmed_in is unset, the channel must not be active
+ assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
+ assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
+ return Err(ClosureReason::FundingTimedOut);
+ }
+
+ let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
+ self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
+ } else { None };
+ Ok((None, timed_out_htlcs, announcement_sigs))
+ }
+
+ /// Indicates the funding transaction is no longer confirmed in the main chain. This may
+ /// force-close the channel, but may also indicate a harmless reorganization of a block or two
+ /// before the channel has reached channel_ready and we can just wait for more blocks.
+ pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
+ if self.context.funding_tx_confirmation_height != 0 {
+ // We handle the funding disconnection by calling best_block_updated with a height one
+ // below where our funding was connected, implying a reorg back to conf_height - 1.
+ let reorg_height = self.context.funding_tx_confirmation_height - 1;
+ // We use the time field to bump the current time we set on channel updates if its
+ // larger. If we don't know that time has moved forward, we can just set it to the last
+ // time we saw and it will be ignored.
+ let best_time = self.context.update_time_counter;
+ match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
+ Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
+ assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
+ assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
+ assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
+ Ok(())
+ },
+ Err(e) => Err(e)
+ }
+ } else {
+ // We never learned about the funding confirmation anyway, just ignore
+ Ok(())
+ }
+ }
+
+ // Methods to get unprompted messages to send to the remote end (or where we already returned
+ // something in the handler for the message that prompted this message):
+
+ /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
+ /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
+ /// directions). Should be used for both broadcasted announcements and in response to an
+ /// AnnouncementSignatures message from the remote peer.
+ ///
+ /// Will only fail if we're not in a state where channel_announcement may be sent (including
+ /// closing).
+ ///
+ /// This will only return ChannelError::Ignore upon failure.
+ ///
+ /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
+ fn get_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
+ ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ if !self.context.config.announced_channel {
+ return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
+ }
+ if !self.context.is_usable() {
+ return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
+ }
+
+ let short_channel_id = self.context.get_short_channel_id()
+ .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
+ let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+ .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
+ let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
+ let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();