Ok(())
}
- /// Creates a new channel from a remote sides' request for one.
- /// Assumes chain_hash has already been checked and corresponds with what we expect!
- pub fn new_from_req<ES: Deref, SP: Deref, F: Deref, L: Deref>(
- fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
- counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
- their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
- current_chain_height: u32, logger: &L, outbound_scid_alias: u64
- ) -> Result<Channel<Signer>, ChannelError>
- where ES::Target: EntropySource,
- SP::Target: SignerProvider<Signer = Signer>,
- F::Target: FeeEstimator,
- L::Target: Logger,
- {
- let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+ #[inline]
+ fn get_closing_scriptpubkey(&self) -> Script {
+ // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
+ // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
+ // outside of those situations will fail.
+ self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
+ }
- // First check the channel type is known, failing before we do anything else if we don't
- // support this channel type.
- let channel_type = if let Some(channel_type) = &msg.channel_type {
- if channel_type.supports_any_optional_bits() {
- return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
- }
+ #[inline]
+ fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
+ let mut ret =
+ (4 + // version
+ 1 + // input count
+ 36 + // prevout
+ 1 + // script length (0)
+ 4 + // sequence
+ 1 + // output count
+ 4 // lock time
+ )*4 + // * 4 for non-witness parts
+ 2 + // witness marker and flag
+ 1 + // witness element count
+ 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
+ self.context.get_funding_redeemscript().len() as u64 + // funding witness script
+ 2*(1 + 71); // two signatures + sighash type flags
+ if let Some(spk) = a_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ if let Some(spk) = b_scriptpubkey {
+ ret += ((8+1) + // output values and script length
+ spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ }
+ ret
+ }
- // We only support the channel types defined by the `ChannelManager` in
- // `provided_channel_type_features`. The channel type must always support
- // `static_remote_key`.
- if !channel_type.requires_static_remote_key() {
- return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
- }
- // Make sure we support all of the features behind the channel type.
- if !channel_type.is_subset(our_supported_features) {
- return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
- }
- if channel_type.requires_scid_privacy() && announced_channel {
- return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
- }
- channel_type.clone()
- } else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
- }
- channel_type
- };
- let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
+ #[inline]
+ fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
+ assert!(self.context.pending_inbound_htlcs.is_empty());
+ assert!(self.context.pending_outbound_htlcs.is_empty());
+ assert!(self.context.pending_update_fee.is_none());
- let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
- let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
- let pubkeys = holder_signer.pubkeys().clone();
- let counterparty_pubkeys = ChannelPublicKeys {
- funding_pubkey: msg.funding_pubkey,
- revocation_basepoint: msg.revocation_basepoint,
- payment_point: msg.payment_point,
- delayed_payment_basepoint: msg.delayed_payment_basepoint,
- htlc_basepoint: msg.htlc_basepoint
- };
+ let mut total_fee_satoshis = proposed_total_fee_satoshis;
+ let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
+ let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
- if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
- return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
+ if value_to_holder < 0 {
+ assert!(self.context.is_outbound());
+ total_fee_satoshis += (-value_to_holder) as u64;
+ } else if value_to_counterparty < 0 {
+ assert!(!self.context.is_outbound());
+ total_fee_satoshis += (-value_to_counterparty) as u64;
}
- // Check sanity of message fields:
- if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
- return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
- }
- if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
- return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
- }
- if msg.channel_reserve_satoshis > msg.funding_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
- }
- let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
- if msg.push_msat > full_channel_value_msat {
- return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
- }
- if msg.dust_limit_satoshis > msg.funding_satoshis {
- return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
- }
- if msg.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_counterparty = 0;
}
- Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
- let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
- if msg.to_self_delay > max_counterparty_selected_contest_delay {
- return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
- }
- if msg.max_accepted_htlcs < 1 {
- return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
- }
- if msg.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+ if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
+ value_to_holder = 0;
}
- // Now check against optional parameters as set by config...
- if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
- return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
- }
- if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
- }
- if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
- }
- if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
- }
- if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
- }
- if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
- }
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let holder_shutdown_script = self.get_closing_scriptpubkey();
+ let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
+ let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
- // Convert things into internal flags and prep our state:
+ let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
+ (closing_transaction, total_fee_satoshis)
+ }
- if config.channel_handshake_limits.force_announced_channel_preference {
- if config.channel_handshake_config.announced_channel != announced_channel {
- return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
- }
- }
+ fn funding_outpoint(&self) -> OutPoint {
+ self.context.channel_transaction_parameters.funding_outpoint.unwrap()
+ }
- let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
- if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- // Protocol level safety check in place, although it should never happen because
- // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
- }
- if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
- msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
- }
- if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
- return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
+ /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
+ /// entirely.
+ ///
+ /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
+ /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
+ ///
+ /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
+ /// disconnected).
+ pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
+ (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
+ where L::Target: Logger {
+ // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
+ // (see equivalent if condition there).
+ assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
+ let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
+ let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
+ self.context.latest_monitor_update_id = mon_update_id;
+ if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
+ assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
}
+ }
- // check if the funder's amount for the initial commitment tx is sufficient
- // for full fee payment plus a few HTLCs to ensure the channel will be useful.
- let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
- let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
- if funders_amount_msat / 1000 < commitment_tx_fee {
- return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
+ fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
+ // Either ChannelReady got set (which means it won't be unset) or there is no way any
+ // caller thought we could have something claimed (cause we wouldn't have accepted in an
+ // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
+ // either.
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
}
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
- // While it's reasonable for us to not meet the channel reserve initially (if they don't
- // want to push much to us), our counterparty should always have more than our reserve.
- if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
- }
+ let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
- let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
- match &msg.shutdown_scriptpubkey {
- &Some(ref script) => {
- // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- if script.len() == 0 {
- None
- } else {
- if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
- }
- Some(script.clone())
- }
- },
- // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &None => {
- return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
- }
- }
- } else { None };
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
- let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
- match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => Some(scriptpubkey),
- Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
- }
- } else { None };
-
- if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
- if !shutdown_scriptpubkey.is_compatible(&their_features) {
- return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ let mut pending_idx = core::usize::MAX;
+ let mut htlc_value_msat = 0;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ assert_eq!(htlc.payment_hash, payment_hash_calc);
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+ }
+ return UpdateFulfillFetch::DuplicateClaim {};
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ // Don't return in release mode here so that we can update channel_monitor
+ }
+ }
+ pending_idx = idx;
+ htlc_value_msat = htlc.amount_msat;
+ break;
}
}
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
+ // this is simply a duplicate claim, not previously failed and we lost funds.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
+ }
- let destination_script = match signer_provider.get_destination_script() {
- Ok(script) => script,
- Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
+ // Now update local state:
+ //
+ // We have to put the payment_preimage in the channel_monitor right away here to ensure we
+ // can claim it even if the channel hits the chain before we see their next commitment.
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
+ payment_preimage: payment_preimage_arg.clone(),
+ }],
};
- let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
-
- let chan = Channel {
- context: ChannelContext {
- user_id,
-
- config: LegacyChannelConfig {
- options: config.channel_config.clone(),
- announced_channel,
- commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
- },
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ // Note that this condition is the same as the assertion in
+ // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
+ // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
+ // do not not get into this branch.
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ // Make sure we don't leave latest_monitor_update_id incremented here:
+ self.context.latest_monitor_update_id -= 1;
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return UpdateFulfillFetch::DuplicateClaim {};
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
+ // TODO: We may actually be able to switch to a fulfill here, though its
+ // rare enough it may not be worth the complexity burden.
+ debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ },
+ _ => {}
+ }
+ }
+ log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
+ payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
+ });
+ #[cfg(any(test, fuzzing))]
+ self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ #[cfg(any(test, fuzzing))]
+ self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
- prev_config: None,
+ {
+ let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+ if let InboundHTLCState::Committed = htlc.state {
+ } else {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ }
+ log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
+ }
- inbound_handshake_limits_override: None,
+ UpdateFulfillFetch::NewClaim {
+ monitor_update,
+ htlc_value_msat,
+ msg: Some(msgs::UpdateFulfillHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc_id_arg,
+ payment_preimage: payment_preimage_arg,
+ }),
+ }
+ }
- temporary_channel_id: Some(msg.temporary_channel_id),
- channel_id: msg.temporary_channel_id,
- channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
- announcement_sigs_state: AnnouncementSigsState::NotSent,
- secp_ctx,
+ pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
+ let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
+ match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
+ UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
+ // Even if we aren't supposed to let new monitor updates with commitment state
+ // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
+ // matter what. Sadly, to push a new monitor update which flies before others
+ // already queued, we have to insert it into the pending queue and update the
+ // update_ids of all the following monitors.
+ let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them
+ // to be strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ self.context.pending_monitor_updates.len() - 1
+ } else {
+ let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
+ .unwrap_or(self.context.pending_monitor_updates.len());
+ let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
+ .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
+ monitor_update.update_id = new_mon_id;
+ self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
+ update: monitor_update, blocked: false,
+ });
+ for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
+ held_update.update.update_id += 1;
+ }
+ if msg.is_some() {
+ debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
+ let update = self.build_commitment_no_status_check(logger);
+ self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
+ update, blocked: true,
+ });
+ }
+ insert_pos
+ };
+ self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
+ UpdateFulfillCommitFetch::NewClaim {
+ monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
+ .expect("We just pushed the monitor update").update,
+ htlc_value_msat,
+ }
+ },
+ UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
+ }
+ }
- latest_monitor_update_id: 0,
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
+ -> Result<(), ChannelError> where L::Target: Logger {
+ self.fail_htlc(htlc_id_arg, err_packet, true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ }
- holder_signer,
- shutdown_scriptpubkey,
- destination_script,
+ /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
+ /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
+ /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
+ /// before we fail backwards.
+ ///
+ /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
+ /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
+ /// [`ChannelError::Ignore`].
+ fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
+ -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ panic!("Was asked to fail an HTLC when channel was not in an operational state");
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- value_to_self_msat: msg.push_msat,
+ // ChannelManager may generate duplicate claims/fails due to HTLC update events from
+ // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
+ // these, but for now we just have to treat them as normal.
- pending_inbound_htlcs: Vec::new(),
- pending_outbound_htlcs: Vec::new(),
- holding_cell_htlc_updates: Vec::new(),
- pending_update_fee: None,
- holding_cell_update_fee: None,
- next_holder_htlc_id: 0,
- next_counterparty_htlc_id: 0,
- update_time_counter: 1,
+ let mut pending_idx = core::usize::MAX;
+ for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
+ if htlc.htlc_id == htlc_id_arg {
+ match htlc.state {
+ InboundHTLCState::Committed => {},
+ InboundHTLCState::LocalRemoved(ref reason) => {
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ } else {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ }
+ return Ok(None);
+ },
+ _ => {
+ debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
+ return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
+ }
+ }
+ pending_idx = idx;
+ }
+ }
+ if pending_idx == core::usize::MAX {
+ #[cfg(any(test, fuzzing))]
+ // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
+ // is simply a duplicate fail, not previously failed and we failed-back too early.
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
- resend_order: RAACommitmentOrder::CommitmentFirst,
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
+ force_holding_cell = true;
+ }
- monitor_pending_channel_ready: false,
- monitor_pending_revoke_and_ack: false,
- monitor_pending_commitment_signed: false,
- monitor_pending_forwards: Vec::new(),
- monitor_pending_failures: Vec::new(),
- monitor_pending_finalized_fulfills: Vec::new(),
+ // Now update local state:
+ if force_holding_cell {
+ for pending_update in self.context.holding_cell_htlc_updates.iter() {
+ match pending_update {
+ &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ #[cfg(any(test, fuzzing))]
+ debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
+ return Ok(None);
+ }
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
+ if htlc_id_arg == htlc_id {
+ debug_assert!(false, "Tried to fail an HTLC that was already failed");
+ return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
+ }
+ },
+ _ => {}
+ }
+ }
+ log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
+ htlc_id: htlc_id_arg,
+ err_packet,
+ });
+ return Ok(None);
+ }
- #[cfg(debug_assertions)]
- holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
- #[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
+ log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
+ {
+ let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
+ }
- last_sent_closing_fee: None,
- pending_counterparty_closing_signed: None,
- closing_fee_limits: None,
- target_closing_feerate_sats_per_kw: None,
+ Ok(Some(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc_id_arg,
+ reason: err_packet
+ }))
+ }
- inbound_awaiting_accept: true,
+ // Message handlers:
- funding_tx_confirmed_in: None,
- funding_tx_confirmation_height: 0,
- short_channel_id: None,
- channel_creation_height: current_chain_height,
+ pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
+ let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
- feerate_per_kw: msg.feerate_per_kw,
- channel_value_satoshis: msg.funding_satoshis,
- counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
- holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
- counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
- holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
- counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
- holder_selected_channel_reserve_satoshis,
- counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
- holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
- counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
- holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
- minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
+ // Check sanity of message fields:
+ if !self.context.is_outbound() {
+ return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
+ }
+ if self.context.channel_state != ChannelState::OurInitSent as u32 {
+ return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
+ }
+ if msg.dust_limit_satoshis > 21000000 * 100000000 {
+ return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
+ }
+ if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
+ return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
+ }
+ if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
+ }
+ if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+ msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
+ }
+ let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
+ if msg.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ }
+ let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ if msg.to_self_delay > max_delay_acceptable {
+ return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
+ }
+ if msg.max_accepted_htlcs < 1 {
+ return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ }
+ if msg.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+ }
- counterparty_forwarding_info: None,
+ // Now check against optional parameters as set by config...
+ if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
+ }
+ if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
+ }
+ if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
+ }
+ if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+ }
+ if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if msg.minimum_depth > peer_limits.max_minimum_depth {
+ return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
+ }
- channel_transaction_parameters: ChannelTransactionParameters {
- holder_pubkeys: pubkeys,
- holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
- is_outbound_from_holder: false,
- counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
- selected_contest_delay: msg.to_self_delay,
- pubkeys: counterparty_pubkeys,
- }),
- funding_outpoint: None,
- opt_anchors: if opt_anchors { Some(()) } else { None },
- opt_non_zero_fee_anchors: None
- },
- funding_transaction: None,
+ if let Some(ty) = &msg.channel_type {
+ if *ty != self.context.channel_type {
+ return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
+ }
+ } else if their_features.supports_channel_type() {
+ // Assume they've accepted the channel type as they said they understand it.
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ self.context.channel_type = channel_type;
+ }
- counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
- counterparty_prev_commitment_point: None,
- counterparty_node_id,
+ let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+ match &msg.shutdown_scriptpubkey {
+ &Some(ref script) => {
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ if script.len() == 0 {
+ None
+ } else {
+ if !script::is_bolt2_compliant(&script, their_features) {
+ return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+ }
+ Some(script.clone())
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &None => {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ }
+ }
+ } else { None };
- counterparty_shutdown_scriptpubkey,
+ self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
+ self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
+ self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
+ self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
+ self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
- commitment_secrets: CounterpartyCommitmentSecrets::new(),
+ if peer_limits.trust_own_funding_0conf {
+ self.context.minimum_depth = Some(msg.minimum_depth);
+ } else {
+ self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
+ }
- channel_update_status: ChannelUpdateStatus::Enabled,
- closing_signed_in_flight: false,
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: msg.funding_pubkey,
+ revocation_basepoint: msg.revocation_basepoint,
+ payment_point: msg.payment_point,
+ delayed_payment_basepoint: msg.delayed_payment_basepoint,
+ htlc_basepoint: msg.htlc_basepoint
+ };
- announcement_sigs: None,
+ self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: msg.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ });
- #[cfg(any(test, fuzzing))]
- next_local_commitment_tx_fee_info_cached: Mutex::new(None),
- #[cfg(any(test, fuzzing))]
- next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
+ self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
+ self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
- workaround_lnd_bug_4006: None,
- sent_message_awaiting_response: None,
+ self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
+ self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
- latest_inbound_scid_alias: None,
- outbound_scid_alias,
+ Ok(())
+ }
- channel_pending_event_emitted: false,
- channel_ready_event_emitted: false,
+ fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
+ let funding_script = self.context.get_funding_redeemscript();
- #[cfg(any(test, fuzzing))]
- historical_inbound_htlc_fulfills: HashSet::new(),
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
+ {
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ // They sign the holder commitment transaction...
+ log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
+ log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
+ encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
+ encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+ secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
+ }
- channel_type,
- channel_keys_id,
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
- pending_monitor_updates: Vec::new(),
- }
- };
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
- Ok(chan)
- }
+ let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
- #[inline]
- fn get_closing_scriptpubkey(&self) -> Script {
- // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
- // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
- // outside of those situations will fail.
- self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
+ // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
+ Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
}
- #[inline]
- fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
- let mut ret =
- (4 + // version
- 1 + // input count
- 36 + // prevout
- 1 + // script length (0)
- 4 + // sequence
- 1 + // output count
- 4 // lock time
- )*4 + // * 4 for non-witness parts
- 2 + // witness marker and flag
- 1 + // witness element count
- 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
- self.context.get_funding_redeemscript().len() as u64 + // funding witness script
- 2*(1 + 71); // two signatures + sighash type flags
- if let Some(spk) = a_scriptpubkey {
- ret += ((8+1) + // output values and script length
- spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ pub fn funding_created<SP: Deref, L: Deref>(
+ &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
+ ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
+ where
+ SP::Target: SignerProvider<Signer = Signer>,
+ L::Target: Logger
+ {
+ if self.context.is_outbound() {
+ return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
}
- if let Some(spk) = b_scriptpubkey {
- ret += ((8+1) + // output values and script length
- spk.len() as u64) * 4; // scriptpubkey and witness multiplier
+ if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+ // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
+ // remember the channel, so it's safe to just send an error_message here and drop the
+ // channel.
+ return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned()));
+ }
+ if self.context.inbound_awaiting_accept {
+ return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()));
+ }
+ if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+ self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+ self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
- ret
- }
- #[inline]
- fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
- assert!(self.context.pending_inbound_htlcs.is_empty());
- assert!(self.context.pending_outbound_htlcs.is_empty());
- assert!(self.context.pending_update_fee.is_none());
+ let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
+ self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
+ // This is an externally observable change before we finish all our checks. In particular
+ // funding_created_signature may fail.
+ self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
- let mut total_fee_satoshis = proposed_total_fee_satoshis;
- let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
- let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
+ let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
+ Ok(res) => res,
+ Err(ChannelError::Close(e)) => {
+ self.context.channel_transaction_parameters.funding_outpoint = None;
+ return Err(ChannelError::Close(e));
+ },
+ Err(e) => {
+ // The only error we know how to handle is ChannelError::Close, so we fall over here
+ // to make sure we don't continue with an inconsistent state.
+ panic!("unexpected error type from funding_created_signature {:?}", e);
+ }
+ };
- if value_to_holder < 0 {
- assert!(self.context.is_outbound());
- total_fee_satoshis += (-value_to_holder) as u64;
- } else if value_to_counterparty < 0 {
- assert!(!self.context.is_outbound());
- total_fee_satoshis += (-value_to_counterparty) as u64;
- }
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
- if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
- value_to_counterparty = 0;
- }
+ self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
+ .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
- if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
- value_to_holder = 0;
- }
+ // Now that we're past error-generating stuff, update our local state:
- assert!(self.context.shutdown_scriptpubkey.is_some());
- let holder_shutdown_script = self.get_closing_scriptpubkey();
- let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
- let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+ let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+ monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
+ &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
+ &self.context.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.context.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx, best_block, self.context.counterparty_node_id);
- let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
- (closing_transaction, total_fee_satoshis)
- }
+ channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
- fn funding_outpoint(&self) -> OutPoint {
- self.context.channel_transaction_parameters.funding_outpoint.unwrap()
- }
+ self.context.channel_state = ChannelState::FundingSent as u32;
+ self.context.channel_id = funding_txo.to_channel_id();
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+ self.context.cur_holder_commitment_transaction_number -= 1;
- /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
- /// entirely.
- ///
- /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
- /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
- ///
- /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
- /// disconnected).
- pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
- (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
- where L::Target: Logger {
- // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
- // (see equivalent if condition there).
- assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
- let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
- let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
- self.context.latest_monitor_update_id = mon_update_id;
- if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
- assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
- }
+ log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
+
+ let need_channel_ready = self.check_get_channel_ready(0).is_some();
+ self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+
+ Ok((msgs::FundingSigned {
+ channel_id: self.context.channel_id,
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ }, channel_monitor))
}
- fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
- // Either ChannelReady got set (which means it won't be unset) or there is no way any
- // caller thought we could have something claimed (cause we wouldn't have accepted in an
- // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
- // either.
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
+ /// Handles a funding_signed message from the remote end.
+ /// If this call is successful, broadcast the funding transaction (and not before!)
+ pub fn funding_signed<SP: Deref, L: Deref>(
+ &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
+ ) -> Result<ChannelMonitor<Signer>, ChannelError>
+ where
+ SP::Target: SignerProvider<Signer = Signer>,
+ L::Target: Logger
+ {
+ if !self.context.is_outbound() {
+ return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
+ }
+ if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
+ return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
+ }
+ if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+ self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+ self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- let payment_hash_calc = PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner());
+ let funding_script = self.context.get_funding_redeemscript();
- // ChannelManager may generate duplicate claims/fails due to HTLC update events from
- // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
- // these, but for now we just have to treat them as normal.
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
- let mut pending_idx = core::usize::MAX;
- let mut htlc_value_msat = 0;
- for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
- if htlc.htlc_id == htlc_id_arg {
- assert_eq!(htlc.payment_hash, payment_hash_calc);
- match htlc.state {
- InboundHTLCState::Committed => {},
- InboundHTLCState::LocalRemoved(ref reason) => {
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
- } else {
- log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id()));
- debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
- }
- return UpdateFulfillFetch::DuplicateClaim {};
- },
- _ => {
- debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- // Don't return in release mode here so that we can update channel_monitor
- }
- }
- pending_idx = idx;
- htlc_value_msat = htlc.amount_msat;
- break;
+ log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
+ log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+ let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
+ {
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
}
}
- if pending_idx == core::usize::MAX {
- #[cfg(any(test, fuzzing))]
- // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
- // this is simply a duplicate claim, not previously failed and we lost funds.
- debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
- return UpdateFulfillFetch::DuplicateClaim {};
- }
- // Now update local state:
- //
- // We have to put the payment_preimage in the channel_monitor right away here to ensure we
- // can claim it even if the channel hits the chain before we see their next commitment.
- self.context.latest_monitor_update_id += 1;
- let monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
- payment_preimage: payment_preimage_arg.clone(),
- }],
- };
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.context.get_holder_pubkeys().funding_pubkey,
+ self.context.counterparty_funding_pubkey()
+ );
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
- // Note that this condition is the same as the assertion in
- // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
- // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
- // do not not get into this branch.
- for pending_update in self.context.holding_cell_htlc_updates.iter() {
- match pending_update {
- &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
- if htlc_id_arg == htlc_id {
- // Make sure we don't leave latest_monitor_update_id incremented here:
- self.context.latest_monitor_update_id -= 1;
- #[cfg(any(test, fuzzing))]
- debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
- return UpdateFulfillFetch::DuplicateClaim {};
- }
- },
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
- if htlc_id_arg == htlc_id {
- log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", log_bytes!(self.context.channel_id()));
- // TODO: We may actually be able to switch to a fulfill here, though its
- // rare enough it may not be worth the complexity burden.
- debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
- return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
- }
- },
- _ => {}
- }
- }
- log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", log_bytes!(self.context.channel_id()), self.context.channel_state);
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
- payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
- });
- #[cfg(any(test, fuzzing))]
- self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
- return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
+ .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+
+
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let funding_txo = self.context.get_funding_txo().unwrap();
+ let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
+ let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
+ let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
+ monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
+ shutdown_script, self.context.get_holder_selected_contest_delay(),
+ &self.context.destination_script, (funding_txo, funding_txo_script),
+ &self.context.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.context.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx, best_block, self.context.counterparty_node_id);
+
+ channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
+
+ assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
+ self.context.channel_state = ChannelState::FundingSent as u32;
+ self.context.cur_holder_commitment_transaction_number -= 1;
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
+
+ log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
+
+ let need_channel_ready = self.check_get_channel_ready(0).is_some();
+ self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ Ok(channel_monitor)
+ }
+
+ /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
+ /// and the channel is now usable (and public), this may generate an announcement_signatures to
+ /// reply with.
+ pub fn channel_ready<NS: Deref, L: Deref>(
+ &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
+ user_config: &UserConfig, best_block: &BestBlock, logger: &L
+ ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ self.context.workaround_lnd_bug_4006 = Some(msg.clone());
+ return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
}
- #[cfg(any(test, fuzzing))]
- self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
- {
- let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
- if let InboundHTLCState::Committed = htlc.state {
- } else {
- debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
+ if let Some(scid_alias) = msg.short_channel_id_alias {
+ if Some(scid_alias) != self.context.short_channel_id {
+ // The scid alias provided can be used to route payments *from* our counterparty,
+ // i.e. can be used for inbound payments and provided in invoices, but is not used
+ // when routing outbound payments.
+ self.context.latest_inbound_scid_alias = Some(scid_alias);
}
- log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
}
- UpdateFulfillFetch::NewClaim {
- monitor_update,
- htlc_value_msat,
- msg: Some(msgs::UpdateFulfillHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc_id_arg,
- payment_preimage: payment_preimage_arg,
- }),
- }
- }
+ let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
- pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
- let release_cs_monitor = self.context.pending_monitor_updates.iter().all(|upd| !upd.blocked);
- match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
- UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
- // Even if we aren't supposed to let new monitor updates with commitment state
- // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
- // matter what. Sadly, to push a new monitor update which flies before others
- // already queued, we have to insert it into the pending queue and update the
- // update_ids of all the following monitors.
- let unblocked_update_pos = if release_cs_monitor && msg.is_some() {
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them
- // to be strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
- update: monitor_update, blocked: false,
- });
- self.context.pending_monitor_updates.len() - 1
+ if non_shutdown_state == ChannelState::FundingSent as u32 {
+ self.context.channel_state |= ChannelState::TheirChannelReady as u32;
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
+ self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
+ self.context.update_time_counter += 1;
+ } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
+ // If we reconnected before sending our `channel_ready` they may still resend theirs:
+ (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
+ (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
+ {
+ // They probably disconnected/reconnected and re-sent the channel_ready, which is
+ // required, or they're sending a fresh SCID alias.
+ let expected_point =
+ if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
+ // If they haven't ever sent an updated point, the point they send should match
+ // the current one.
+ self.context.counterparty_cur_commitment_point
+ } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
+ // If we've advanced the commitment number once, the second commitment point is
+ // at `counterparty_prev_commitment_point`, which is not yet revoked.
+ debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
+ self.context.counterparty_prev_commitment_point
} else {
- let insert_pos = self.context.pending_monitor_updates.iter().position(|upd| upd.blocked)
- .unwrap_or(self.context.pending_monitor_updates.len());
- let new_mon_id = self.context.pending_monitor_updates.get(insert_pos)
- .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
- monitor_update.update_id = new_mon_id;
- self.context.pending_monitor_updates.insert(insert_pos, PendingChannelMonitorUpdate {
- update: monitor_update, blocked: false,
- });
- for held_update in self.context.pending_monitor_updates.iter_mut().skip(insert_pos + 1) {
- held_update.update.update_id += 1;
- }
- if msg.is_some() {
- debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
- let update = self.build_commitment_no_status_check(logger);
- self.context.pending_monitor_updates.push(PendingChannelMonitorUpdate {
- update, blocked: true,
- });
- }
- insert_pos
+ // If they have sent updated points, channel_ready is always supposed to match
+ // their "first" point, which we re-derive here.
+ Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
+ &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
+ ).expect("We already advanced, so previous secret keys should have been validated already")))
};
- self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
- UpdateFulfillCommitFetch::NewClaim {
- monitor_update: &self.context.pending_monitor_updates.get(unblocked_update_pos)
- .expect("We just pushed the monitor update").update,
- htlc_value_msat,
- }
- },
- UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
+ if expected_point != Some(msg.next_per_commitment_point) {
+ return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
+ }
+ return Ok(None);
+ } else {
+ return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
}
- }
- /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
- /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
- /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
- /// before we fail backwards.
- ///
- /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
- /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
- /// [`ChannelError::Ignore`].
- pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
- -> Result<(), ChannelError> where L::Target: Logger {
- self.fail_htlc(htlc_id_arg, err_packet, true, logger)
- .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
- }
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
- /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
- /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
- /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
- /// before we fail backwards.
- ///
- /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
- /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
- /// [`ChannelError::Ignore`].
- fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
- -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- panic!("Was asked to fail an HTLC when channel was not in an operational state");
- }
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
- // ChannelManager may generate duplicate claims/fails due to HTLC update events from
- // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
- // these, but for now we just have to treat them as normal.
+ Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
+ }
- let mut pending_idx = core::usize::MAX;
- for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
- if htlc.htlc_id == htlc_id_arg {
- match htlc.state {
- InboundHTLCState::Committed => {},
- InboundHTLCState::LocalRemoved(ref reason) => {
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
- } else {
- debug_assert!(false, "Tried to fail an HTLC that was already failed");
- }
- return Ok(None);
- },
- _ => {
- debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
- return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
- }
- }
- pending_idx = idx;
- }
- }
- if pending_idx == core::usize::MAX {
- #[cfg(any(test, fuzzing))]
- // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
- // is simply a duplicate fail, not previously failed and we failed-back too early.
- debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
- return Ok(None);
+ pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
+ where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
+ // We can't accept HTLCs sent after we've sent a shutdown.
+ let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
+ if local_sent_shutdown {
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
}
-
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
- debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
- force_holding_cell = true;
+ // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
+ let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
+ if remote_sent_shutdown {
+ return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
}
-
- // Now update local state:
- if force_holding_cell {
- for pending_update in self.context.holding_cell_htlc_updates.iter() {
- match pending_update {
- &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
- if htlc_id_arg == htlc_id {
- #[cfg(any(test, fuzzing))]
- debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
- return Ok(None);
- }
- },
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
- if htlc_id_arg == htlc_id {
- debug_assert!(false, "Tried to fail an HTLC that was already failed");
- return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
- }
- },
- _ => {}
- }
- }
- log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
- htlc_id: htlc_id_arg,
- err_packet,
- });
- return Ok(None);
- }
-
- log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, log_bytes!(self.context.channel_id()));
- {
- let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
- }
-
- Ok(Some(msgs::UpdateFailHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc_id_arg,
- reason: err_packet
- }))
- }
-
- // Message handlers:
-
- pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
- let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
-
- // Check sanity of message fields:
- if !self.context.is_outbound() {
- return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
- }
- if self.context.channel_state != ChannelState::OurInitSent as u32 {
- return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
- }
- if msg.dust_limit_satoshis > 21000000 * 100000000 {
- return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
- }
- if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
- }
- if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
- }
- if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
- msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
- }
- let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
- if msg.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
}
- let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
- if msg.to_self_delay > max_delay_acceptable {
- return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
+ if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
+ return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
}
- if msg.max_accepted_htlcs < 1 {
- return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ if msg.amount_msat == 0 {
+ return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
}
- if msg.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+ if msg.amount_msat < self.context.holder_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
}
- // Now check against optional parameters as set by config...
- if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
- }
- if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
- }
- if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
- }
- if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
- return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
- }
- if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
+ return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
}
- if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
}
- if msg.minimum_depth > peer_limits.max_minimum_depth {
- return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
+ // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
+ // the reserve_satoshis we told them to always have as direct payment so that they lose
+ // something if we punish them for broadcasting an old state).
+ // Note that we don't really care about having a small/no to_remote output in our local
+ // commitment transactions, as the purpose of the channel reserve is to ensure we can
+ // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
+ // present in the next commitment transaction we send them (at least for fulfilled ones,
+ // failed ones won't modify value_to_self).
+ // Note that we will send HTLCs which another instance of rust-lightning would think
+ // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
+ // Channel state once they will not be present in the next received commitment
+ // transaction).
+ let mut removed_outbound_total_msat = 0;
+ for ref htlc in self.context.pending_outbound_htlcs.iter() {
+ if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
+ removed_outbound_total_msat += htlc.amount_msat;
+ } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
+ removed_outbound_total_msat += htlc.amount_msat;
+ }
}
- if let Some(ty) = &msg.channel_type {
- if *ty != self.context.channel_type {
- return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
- }
- } else if their_features.supports_channel_type() {
- // Assume they've accepted the channel type as they said they understand it.
+ let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
+ (0, 0)
} else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
+ (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
+ dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
+ };
+ let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
+ let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
+ if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
+ on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
- self.context.channel_type = channel_type;
}
- let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
- match &msg.shutdown_scriptpubkey {
- &Some(ref script) => {
- // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- if script.len() == 0 {
- None
- } else {
- if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
- }
- Some(script.clone())
- }
- },
- // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &None => {
- return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
- }
+ let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
+ if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
+ let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
+ if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
+ log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
+ on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
}
- } else { None };
-
- self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
- self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
- self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
- self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
- self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
+ }
- if peer_limits.trust_own_funding_0conf {
- self.context.minimum_depth = Some(msg.minimum_depth);
- } else {
- self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
+ let pending_value_to_self_msat =
+ self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
+ let pending_remote_value_msat =
+ self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
+ if pending_remote_value_msat < msg.amount_msat {
+ return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
}
- let counterparty_pubkeys = ChannelPublicKeys {
- funding_pubkey: msg.funding_pubkey,
- revocation_basepoint: msg.revocation_basepoint,
- payment_point: msg.payment_point,
- delayed_payment_basepoint: msg.delayed_payment_basepoint,
- htlc_basepoint: msg.htlc_basepoint
+ // Check that the remote can afford to pay for this HTLC on-chain at the current
+ // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
+ let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
+ };
+ if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
+ return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
};
- self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
- selected_contest_delay: msg.to_self_delay,
- pubkeys: counterparty_pubkeys,
- });
+ if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
+ return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
+ }
- self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
- self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
+ if !self.context.is_outbound() {
+ // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
+ // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
+ // receiver's side, only on the sender's.
+ // Note that when we eventually remove support for fee updates and switch to anchor output
+ // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
+ // the extra htlc when calculating the next remote commitment transaction fee as we should
+ // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
+ // sensitive to fee spikes.
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
+ if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
+ // Note that if the pending_forward_status is not updated here, then it's because we're already failing
+ // the HTLC, i.e. its status is already set to failing.
+ log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
+ pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ }
+ } else {
+ // Check that they won't violate our local required channel reserve by adding this HTLC.
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
+ return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
+ }
+ }
+ if self.context.next_counterparty_htlc_id != msg.htlc_id {
+ return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
+ }
+ if msg.cltv_expiry >= 500000000 {
+ return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
+ }
- self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
- self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
+ if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
+ if let PendingHTLCStatus::Forward(_) = pending_forward_status {
+ panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
+ }
+ }
- Ok(())
+ // Now update local state:
+ self.context.next_counterparty_htlc_id += 1;
+ self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
+ htlc_id: msg.htlc_id,
+ amount_msat: msg.amount_msat,
+ payment_hash: msg.payment_hash,
+ cltv_expiry: msg.cltv_expiry,
+ state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
+ });
+ Ok(())
}
- fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
- let funding_script = self.context.get_funding_redeemscript();
+ /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
+ #[inline]
+ fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
+ assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if htlc.htlc_id == htlc_id {
+ let outcome = match check_preimage {
+ None => fail_reason.into(),
+ Some(payment_preimage) => {
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+ if payment_hash != htlc.payment_hash {
+ return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
+ }
+ OutboundHTLCOutcome::Success(Some(payment_preimage))
+ }
+ };
+ match htlc.state {
+ OutboundHTLCState::LocalAnnounced(_) =>
+ return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
+ OutboundHTLCState::Committed => {
+ htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
+ },
+ OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
+ return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+ }
+ return Ok(htlc);
+ }
+ }
+ Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
+ }
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
- {
- let trusted_tx = initial_commitment_tx.trust();
- let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
- let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
- // They sign the holder commitment transaction...
- log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
- log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
- encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
- encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
- secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
+ pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
}
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
+ }
- let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
- let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
- log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
- log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+ pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
+ }
- let counterparty_signature = self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
+ self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
+ Ok(())
+ }
- // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
- Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
+ pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+ }
+
+ self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
+ Ok(())
}
- pub fn funding_created<SP: Deref, L: Deref>(
- &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError>
- where
- SP::Target: SignerProvider<Signer = Signer>,
- L::Target: Logger
+ pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
+ where L::Target: Logger
{
- if self.context.is_outbound() {
- return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
- }
- if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
- // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
- // remember the channel, so it's safe to just send an error_message here and drop the
- // channel.
- return Err(ChannelError::Close("Received funding_created after we got the channel!".to_owned()));
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
}
- if self.context.inbound_awaiting_accept {
- return Err(ChannelError::Close("FundingCreated message received before the channel was accepted".to_owned()));
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
}
- if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
- self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
}
- let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
- self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
- // This is an externally observable change before we finish all our checks. In particular
- // funding_created_signature may fail.
- self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ let funding_script = self.context.get_funding_redeemscript();
- let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
- Ok(res) => res,
- Err(ChannelError::Close(e)) => {
- self.context.channel_transaction_parameters.funding_outpoint = None;
- return Err(ChannelError::Close(e));
- },
- Err(e) => {
- // The only error we know how to handle is ChannelError::Close, so we fall over here
- // to make sure we don't continue with an inconsistent state.
- panic!("unexpected error type from funding_created_signature {:?}", e);
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
+ let commitment_txid = {
+ let trusted_tx = commitment_stats.tx.trust();
+ let bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+
+ log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
+ log_bytes!(msg.signature.serialize_compact()[..]),
+ log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
+ log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
+ return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
}
+ bitcoin_tx.txid
};
+ let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
+
+ // If our counterparty updated the channel fee in this commitment transaction, check that
+ // they can actually afford the new fee now.
+ let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
+ update_state == FeeUpdateState::RemoteAnnounced
+ } else { false };
+ if update_fee {
+ debug_assert!(!self.context.is_outbound());
+ let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
+ if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
+ return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
+ }
+ }
+ #[cfg(any(test, fuzzing))]
+ {
+ if self.context.is_outbound() {
+ let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
+ *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ if let Some(info) = projected_commit_tx_info {
+ let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
+ + self.context.holding_cell_htlc_updates.len();
+ if info.total_pending_htlcs == total_pending_htlcs
+ && info.next_holder_htlc_id == self.context.next_holder_htlc_id
+ && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
+ && info.feerate == self.context.feerate_per_kw {
+ assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
+ }
+ }
+ }
+ }
+
+ if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
+ return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
+ }
+
+ // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
+ // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
+ // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
+ // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
+ // backwards compatibility, we never use it in production. To provide test coverage, here,
+ // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
+ #[allow(unused_assignments, unused_mut)]
+ let mut separate_nondust_htlc_sources = false;
+ #[cfg(all(feature = "std", any(test, fuzzing)))] {
+ use core::hash::{BuildHasher, Hasher};
+ // Get a random value using the only std API to do so - the DefaultHasher
+ let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
+ separate_nondust_htlc_sources = rand_val % 2 == 0;
+ }
+
+ let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
+ let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
+ for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
+ if let Some(_) = htlc.transaction_output_index {
+ let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
+ self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
+ false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+
+ let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
+ let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
+ let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
+ log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
+ log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
+ encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
+ if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
+ return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
+ }
+ if !separate_nondust_htlc_sources {
+ htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
+ }
+ } else {
+ htlcs_and_sigs.push((htlc, None, source_opt.take()));
+ }
+ if separate_nondust_htlc_sources {
+ if let Some(source) = source_opt.take() {
+ nondust_htlc_sources.push(source);
+ }
+ }
+ debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
+ }
let holder_commitment_tx = HolderCommitmentTransaction::new(
- initial_commitment_tx,
+ commitment_stats.tx,
msg.signature,
- Vec::new(),
+ msg.htlc_signatures.clone(),
&self.context.get_holder_pubkeys().funding_pubkey,
self.context.counterparty_funding_pubkey()
);
- self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
+ self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
.map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
- // Now that we're past error-generating stuff, update our local state:
+ // Update state now that we've passed all the can-fail calls...
+ let mut need_commitment = false;
+ if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
+ if *update_state == FeeUpdateState::RemoteAnnounced {
+ *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
+ need_commitment = true;
+ }
+ }
- let funding_redeemscript = self.context.get_funding_redeemscript();
- let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
- let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
- let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
- monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
- let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
- shutdown_script, self.context.get_holder_selected_contest_delay(),
- &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
- &self.context.channel_transaction_parameters,
- funding_redeemscript.clone(), self.context.channel_value_satoshis,
- obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
+ for htlc in self.context.pending_inbound_htlcs.iter_mut() {
+ let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
+ Some(forward_info.clone())
+ } else { None };
+ if let Some(forward_info) = new_forward {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+ htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
+ need_commitment = true;
+ }
+ }
+ let mut claimed_htlcs = Vec::new();
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
+ log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
+ // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
+ // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
+ // have a `Success(None)` reason. In this case we could forget some HTLC
+ // claims, but such an upgrade is unlikely and including claimed HTLCs here
+ // fixes a bug which the user was exposed to on 0.0.104 when they started the
+ // claim anyway.
+ claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
+ }
+ htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
+ need_commitment = true;
+ }
+ }
- channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
+ commitment_tx: holder_commitment_tx,
+ htlc_outputs: htlcs_and_sigs,
+ claimed_htlcs,
+ nondust_htlc_sources,
+ }]
+ };
- self.context.channel_state = ChannelState::FundingSent as u32;
- self.context.channel_id = funding_txo.to_channel_id();
- self.context.cur_counterparty_commitment_transaction_number -= 1;
self.context.cur_holder_commitment_transaction_number -= 1;
+ // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
+ // build_commitment_no_status_check() next which will reset this to RAAFirst.
+ self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
- log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.context.channel_id()));
+ if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ // In case we initially failed monitor updating without requiring a response, we need
+ // to make sure the RAA gets sent first.
+ self.context.monitor_pending_revoke_and_ack = true;
+ if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we were going to send a commitment_signed after the RAA, go ahead and do all
+ // the corresponding HTLC status updates so that get_last_commitment_update
+ // includes the right HTLCs.
+ self.context.monitor_pending_commitment_signed = true;
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ }
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
+ log_bytes!(self.context.channel_id));
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
+ }
- let need_channel_ready = self.check_get_channel_ready(0).is_some();
- self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
+ let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
+ // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
+ // we'll send one right away when we get the revoke_and_ack when we
+ // free_holding_cell_htlcs().
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+ true
+ } else { false };
- Ok((msgs::FundingSigned {
- channel_id: self.context.channel_id,
- signature,
- #[cfg(taproot)]
- partial_signature_with_nonce: None,
- }, channel_monitor))
+ log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
+ log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
+ self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
+ return Ok(self.push_ret_blockable_mon_update(monitor_update));
}
- /// Handles a funding_signed message from the remote end.
- /// If this call is successful, broadcast the funding transaction (and not before!)
- pub fn funding_signed<SP: Deref, L: Deref>(
- &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
- ) -> Result<ChannelMonitor<Signer>, ChannelError>
- where
- SP::Target: SignerProvider<Signer = Signer>,
- L::Target: Logger
- {
- if !self.context.is_outbound() {
- return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
- }
- if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
- return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
- }
- if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
- self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
- }
-
- let funding_script = self.context.get_funding_redeemscript();
+ /// Public version of the below, checking relevant preconditions first.
+ /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
+ /// returns `(None, Vec::new())`.
+ pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
+ (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
+ self.free_holding_cell_htlcs(logger)
+ } else { (None, Vec::new()) }
+ }
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
- let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
- let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+ /// Frees any pending commitment updates in the holding cell, generating the relevant messages
+ /// for our counterparty.
+ fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
+ assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
+ if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
+ log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
+ if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
- log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
- log_bytes!(self.context.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
+ updates: Vec::new(),
+ };
- let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
- {
- let trusted_tx = initial_commitment_tx.trust();
- let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
- let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
- // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
- return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
+ let mut htlc_updates = Vec::new();
+ mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
+ let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
+ let mut htlcs_to_fail = Vec::new();
+ for htlc_update in htlc_updates.drain(..) {
+ // Note that this *can* fail, though it should be due to rather-rare conditions on
+ // fee races with adding too many outputs which push our total payments just over
+ // the limit. In case it's less rare than I anticipate, we may want to revisit
+ // handling this case better and maybe fulfilling some of the HTLCs while attempting
+ // to rebalance channels.
+ match &htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
+ match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
+ Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
+ Err(e) => {
+ match e {
+ ChannelError::Ignore(ref msg) => {
+ log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
+ log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
+ // If we fail to send here, then this HTLC should
+ // be failed backwards. Failing to send here
+ // indicates that this HTLC may keep being put back
+ // into the holding cell without ever being
+ // successfully forwarded/failed/fulfilled, causing
+ // our counterparty to eventually close on us.
+ htlcs_to_fail.push((source.clone(), *payment_hash));
+ },
+ _ => {
+ panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
+ },
+ }
+ }
+ }
+ },
+ &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
+ // If an HTLC claim was previously added to the holding cell (via
+ // `get_update_fulfill_htlc`, then generating the claim message itself must
+ // not fail - any in between attempts to claim the HTLC will have resulted
+ // in it hitting the holding cell again and we cannot change the state of a
+ // holding cell HTLC from fulfill to anything else.
+ let (update_fulfill_msg_option, mut additional_monitor_update) =
+ if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
+ (msg, monitor_update)
+ } else { unreachable!() };
+ update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
+ monitor_update.updates.append(&mut additional_monitor_update.updates);
+ },
+ &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
+ match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
+ Ok(update_fail_msg_option) => {
+ // If an HTLC failure was previously added to the holding cell (via
+ // `queue_fail_htlc`) then generating the fail message itself must
+ // not fail - we should never end up in a state where we double-fail
+ // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
+ // for a full revocation before failing.
+ update_fail_htlcs.push(update_fail_msg_option.unwrap())
+ },
+ Err(e) => {
+ if let ChannelError::Ignore(_) = e {}
+ else {
+ panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
+ }
+ }
+ }
+ },
+ }
}
- }
+ if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
+ return (None, htlcs_to_fail);
+ }
+ let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
+ self.send_update_fee(feerate, false, logger)
+ } else {
+ None
+ };
- let holder_commitment_tx = HolderCommitmentTransaction::new(
- initial_commitment_tx,
- msg.signature,
- Vec::new(),
- &self.context.get_holder_pubkeys().funding_pubkey,
- self.context.counterparty_funding_pubkey()
- );
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
+ // but we want them to be strictly increasing by one, so reset it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
- self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, Vec::new())
- .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+ log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
+ log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
+ } else {
+ (None, Vec::new())
+ }
+ }
- let funding_redeemscript = self.context.get_funding_redeemscript();
- let funding_txo = self.context.get_funding_txo().unwrap();
- let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
- let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
- let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
- monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
- let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
- shutdown_script, self.context.get_holder_selected_contest_delay(),
- &self.context.destination_script, (funding_txo, funding_txo_script),
- &self.context.channel_transaction_parameters,
- funding_redeemscript.clone(), self.context.channel_value_satoshis,
- obscure_factor,
- holder_commitment_tx, best_block, self.context.counterparty_node_id);
-
- channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, self.context.counterparty_cur_commitment_point.unwrap(), logger);
-
- assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
- self.context.channel_state = ChannelState::FundingSent as u32;
- self.context.cur_holder_commitment_transaction_number -= 1;
- self.context.cur_counterparty_commitment_transaction_number -= 1;
-
- log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.context.channel_id()));
-
- let need_channel_ready = self.check_get_channel_ready(0).is_some();
- self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
- Ok(channel_monitor)
- }
-
- /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
- /// and the channel is now usable (and public), this may generate an announcement_signatures to
- /// reply with.
- pub fn channel_ready<NS: Deref, L: Deref>(
- &mut self, msg: &msgs::ChannelReady, node_signer: &NS, genesis_block_hash: BlockHash,
- user_config: &UserConfig, best_block: &BestBlock, logger: &L
- ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
- where
- NS::Target: NodeSigner,
- L::Target: Logger
+ /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
+ /// commitment_signed message here in case we had pending outbound HTLCs to add which were
+ /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
+ /// generating an appropriate error *after* the channel state has been updated based on the
+ /// revoke_and_ack message.
+ pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
+ where L::Target: Logger,
{
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+ }
if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- self.context.workaround_lnd_bug_4006 = Some(msg.clone());
- return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
+ return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
}
-
- if let Some(scid_alias) = msg.short_channel_id_alias {
- if Some(scid_alias) != self.context.short_channel_id {
- // The scid alias provided can be used to route payments *from* our counterparty,
- // i.e. can be used for inbound payments and provided in invoices, but is not used
- // when routing outbound payments.
- self.context.latest_inbound_scid_alias = Some(scid_alias);
- }
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
+ return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
}
- let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
+ let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
- if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.context.channel_state |= ChannelState::TheirChannelReady as u32;
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
- self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
- self.context.update_time_counter += 1;
- } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
- // If we reconnected before sending our `channel_ready` they may still resend theirs:
- (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
- (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
- {
- // They probably disconnected/reconnected and re-sent the channel_ready, which is
- // required, or they're sending a fresh SCID alias.
- let expected_point =
- if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
- // If they haven't ever sent an updated point, the point they send should match
- // the current one.
- self.context.counterparty_cur_commitment_point
- } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
- // If we've advanced the commitment number once, the second commitment point is
- // at `counterparty_prev_commitment_point`, which is not yet revoked.
- debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
- self.context.counterparty_prev_commitment_point
- } else {
- // If they have sent updated points, channel_ready is always supposed to match
- // their "first" point, which we re-derive here.
- Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
- &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
- ).expect("We already advanced, so previous secret keys should have been validated already")))
- };
- if expected_point != Some(msg.next_per_commitment_point) {
- return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
+ if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
+ if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
+ return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
}
- return Ok(None);
- } else {
- return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
}
- self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
- self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
-
- log_info!(logger, "Received channel_ready from peer for channel {}", log_bytes!(self.context.channel_id()));
-
- Ok(self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger))
- }
-
- pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
- where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus, L::Target: Logger {
- // We can't accept HTLCs sent after we've sent a shutdown.
- let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if local_sent_shutdown {
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
- }
- // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
- let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
- if remote_sent_shutdown {
- return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
- }
- if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
- return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
- }
- if msg.amount_msat == 0 {
- return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
- }
- if msg.amount_msat < self.context.holder_htlc_minimum_msat {
- return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
+ if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
+ // Our counterparty seems to have burned their coins to us (by revoking a state when we
+ // haven't given them a new commitment transaction to broadcast). We should probably
+ // take advantage of this by updating our channel monitor, sending them an error, and
+ // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
+ // lot of work, and there's some chance this is all a misunderstanding anyway.
+ // We have to do *something*, though, since our signer may get mad at us for otherwise
+ // jumping a remote commitment number, so best to just force-close and move on.
+ return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
}
- let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
- let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
- if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
- return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
- }
- if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
- return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
- }
- // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
- // the reserve_satoshis we told them to always have as direct payment so that they lose
- // something if we punish them for broadcasting an old state).
- // Note that we don't really care about having a small/no to_remote output in our local
- // commitment transactions, as the purpose of the channel reserve is to ensure we can
- // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
- // present in the next commitment transaction we send them (at least for fulfilled ones,
- // failed ones won't modify value_to_self).
- // Note that we will send HTLCs which another instance of rust-lightning would think
- // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
- // Channel state once they will not be present in the next received commitment
- // transaction).
- let mut removed_outbound_total_msat = 0;
- for ref htlc in self.context.pending_outbound_htlcs.iter() {
- if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
- removed_outbound_total_msat += htlc.amount_msat;
- } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
- removed_outbound_total_msat += htlc.amount_msat;
- }
+ #[cfg(any(test, fuzzing))]
+ {
+ *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
}
- let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.opt_anchors() {
- (0, 0)
- } else {
- let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
- (dust_buffer_feerate * htlc_timeout_tx_weight(false) / 1000,
- dust_buffer_feerate * htlc_success_tx_weight(false) / 1000)
+ self.context.holder_signer.validate_counterparty_revocation(
+ self.context.cur_counterparty_commitment_transaction_number + 1,
+ &secret
+ ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
+
+ self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
+ self.context.latest_monitor_update_id += 1;
+ let mut monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
+ idx: self.context.cur_counterparty_commitment_transaction_number + 1,
+ secret: msg.per_commitment_secret,
+ }],
};
- let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
- if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
- let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
- if on_counterparty_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
- log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
- on_counterparty_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
- }
- let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
- if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
- let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
- if on_holder_tx_dust_htlc_exposure_msat > self.context.get_max_dust_htlc_exposure_msat() {
- log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
- on_holder_tx_dust_htlc_exposure_msat, self.context.get_max_dust_htlc_exposure_msat());
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
- }
- }
+ // Update state now that we've passed all the can-fail calls...
+ // (note that we may still fail to generate the new commitment_signed message, but that's
+ // OK, we step the channel here and *then* if the new generation fails we can fail the
+ // channel based on that, but stepping stuff here should be safe either way.
+ self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
+ self.context.sent_message_awaiting_response = None;
+ self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
+ self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
+ self.context.cur_counterparty_commitment_transaction_number -= 1;
- let pending_value_to_self_msat =
- self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
- let pending_remote_value_msat =
- self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
- if pending_remote_value_msat < msg.amount_msat {
- return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
+ if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
- // Check that the remote can afford to pay for this HTLC on-chain at the current
- // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
- let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
- };
- if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
- return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
- };
+ log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
+ let mut to_forward_infos = Vec::new();
+ let mut revoked_htlcs = Vec::new();
+ let mut finalized_claimed_htlcs = Vec::new();
+ let mut update_fail_htlcs = Vec::new();
+ let mut update_fail_malformed_htlcs = Vec::new();
+ let mut require_commitment = false;
+ let mut value_to_self_msat_diff: i64 = 0;
- if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < self.context.holder_selected_channel_reserve_satoshis * 1000 {
- return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
- }
+ {
+ // Take references explicitly so that we can hold multiple references to self.context.
+ let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
+ let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
- if !self.context.is_outbound() {
- // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
- // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
- // receiver's side, only on the sender's.
- // Note that when we eventually remove support for fee updates and switch to anchor output
- // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
- // the extra htlc when calculating the next remote commitment transaction fee as we should
- // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
- // sensitive to fee spikes.
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
- if pending_remote_value_msat - msg.amount_msat - self.context.holder_selected_channel_reserve_satoshis * 1000 < remote_fee_cost_incl_stuck_buffer_msat {
- // Note that if the pending_forward_status is not updated here, then it's because we're already failing
- // the HTLC, i.e. its status is already set to failing.
- log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", log_bytes!(self.context.channel_id()));
- pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
+ // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
+ pending_inbound_htlcs.retain(|htlc| {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
+ if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
+ value_to_self_msat_diff += htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ pending_outbound_htlcs.retain(|htlc| {
+ if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
+ log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
+ if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
+ revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
+ } else {
+ finalized_claimed_htlcs.push(htlc.source.clone());
+ // They fulfilled, so we sent them money
+ value_to_self_msat_diff -= htlc.amount_msat as i64;
+ }
+ false
+ } else { true }
+ });
+ for htlc in pending_inbound_htlcs.iter_mut() {
+ let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
+ true
+ } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
+ true
+ } else { false };
+ if swap {
+ let mut state = InboundHTLCState::Committed;
+ mem::swap(&mut state, &mut htlc.state);
+
+ if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
+ require_commitment = true;
+ } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
+ match forward_info {
+ PendingHTLCStatus::Fail(fail_msg) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
+ require_commitment = true;
+ match fail_msg {
+ HTLCFailureMsg::Relay(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
+ update_fail_htlcs.push(msg)
+ },
+ HTLCFailureMsg::Malformed(msg) => {
+ htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
+ update_fail_malformed_htlcs.push(msg)
+ },
+ }
+ },
+ PendingHTLCStatus::Forward(forward_info) => {
+ log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
+ to_forward_infos.push((forward_info, htlc.htlc_id));
+ htlc.state = InboundHTLCState::Committed;
+ }
+ }
+ }
+ }
}
- } else {
- // Check that they won't violate our local required channel reserve by adding this HTLC.
- let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
- let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
- if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat {
- return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
+ for htlc in pending_outbound_htlcs.iter_mut() {
+ if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+ log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
+ htlc.state = OutboundHTLCState::Committed;
+ }
+ if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+ require_commitment = true;
+ }
}
}
- if self.context.next_counterparty_htlc_id != msg.htlc_id {
- return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
- }
- if msg.cltv_expiry >= 500000000 {
- return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
+ self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
+
+ if let Some((feerate, update_state)) = self.context.pending_update_fee {
+ match update_state {
+ FeeUpdateState::Outbound => {
+ debug_assert!(self.context.is_outbound());
+ log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ },
+ FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
+ FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
+ debug_assert!(!self.context.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+ require_commitment = true;
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ },
+ }
}
- if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
- if let PendingHTLCStatus::Forward(_) = pending_forward_status {
- panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
+ if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
+ // We can't actually generate a new commitment transaction (incl by freeing holding
+ // cells) while we can't update the monitor, so we just return what we have.
+ if require_commitment {
+ self.context.monitor_pending_commitment_signed = true;
+ // When the monitor updating is restored we'll call get_last_commitment_update(),
+ // which does not update state, but we're definitely now awaiting a remote revoke
+ // before we can step forward any more, so set it here.
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
}
+ self.context.monitor_pending_forwards.append(&mut to_forward_infos);
+ self.context.monitor_pending_failures.append(&mut revoked_htlcs);
+ self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
+ return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
}
- // Now update local state:
- self.context.next_counterparty_htlc_id += 1;
- self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
- htlc_id: msg.htlc_id,
- amount_msat: msg.amount_msat,
- payment_hash: msg.payment_hash,
- cltv_expiry: msg.cltv_expiry,
- state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
- });
- Ok(())
- }
+ match self.free_holding_cell_htlcs(logger) {
+ (Some(_), htlcs_to_fail) => {
+ let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
+ // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
- /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
- #[inline]
- fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
- assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if htlc.htlc_id == htlc_id {
- let outcome = match check_preimage {
- None => fail_reason.into(),
- Some(payment_preimage) => {
- let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
- if payment_hash != htlc.payment_hash {
- return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
- }
- OutboundHTLCOutcome::Success(Some(payment_preimage))
- }
- };
- match htlc.state {
- OutboundHTLCState::LocalAnnounced(_) =>
- return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
- OutboundHTLCState::Committed => {
- htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
- },
- OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
- return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+ },
+ (None, htlcs_to_fail) => {
+ if require_commitment {
+ let mut additional_update = self.build_commitment_no_status_check(logger);
+
+ // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
+ // strictly increasing by one, so decrement it here.
+ self.context.latest_monitor_update_id = monitor_update.update_id;
+ monitor_update.updates.append(&mut additional_update.updates);
+
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
+ log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
+ self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
+ } else {
+ log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
+ self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
+ Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
}
- return Ok(htlc);
}
}
- Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
}
- pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
- }
-
- self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
+ /// Queues up an outbound update fee by placing it in the holding cell. You should call
+ /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+ /// commitment update.
+ pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
+ let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
+ assert!(msg_opt.is_none(), "We forced holding cell?");
}
- pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
+ /// Adds a pending update to this channel. See the doc for send_htlc for
+ /// further details on the optionness of the return value.
+ /// If our balance is too low to cover the cost of the next commitment transaction at the
+ /// new feerate, the update is cancelled.
+ ///
+ /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
+ /// [`Channel`] if `force_holding_cell` is false.
+ fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
+ if !self.context.is_outbound() {
+ panic!("Cannot send fee from inbound channel");
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
+ if !self.context.is_usable() {
+ panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
}
-
- self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
- Ok(())
- }
-
- pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
+ if !self.context.is_live() {
+ panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
}
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
+
+ // Before proposing a feerate update, check that we can actually afford the new fee.
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
+ let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
+ let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000;
+ let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
+ if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
+ //TODO: auto-close after a number of failures?
+ log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
+ return None;
}
- self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
- Ok(())
- }
-
- pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError>
- where L::Target: Logger
- {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
+ // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ return None;
}
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
- return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
+ if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
+ return None;
}
- let funding_script = self.context.get_funding_redeemscript();
+ if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
+ force_holding_cell = true;
+ }
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
+ if force_holding_cell {
+ self.context.holding_cell_update_fee = Some(feerate_per_kw);
+ return None;
+ }
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
- let commitment_txid = {
- let trusted_tx = commitment_stats.tx.trust();
- let bitcoin_tx = trusted_tx.built_transaction();
- let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
+ debug_assert!(self.context.pending_update_fee.is_none());
+ self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
- log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
- log_bytes!(msg.signature.serialize_compact()[..]),
- log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
- log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.context.channel_id()));
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
- return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
- }
- bitcoin_tx.txid
- };
- let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
+ Some(msgs::UpdateFee {
+ channel_id: self.context.channel_id,
+ feerate_per_kw,
+ })
+ }
- // If our counterparty updated the channel fee in this commitment transaction, check that
- // they can actually afford the new fee now.
- let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
- update_state == FeeUpdateState::RemoteAnnounced
- } else { false };
- if update_fee {
- debug_assert!(!self.context.is_outbound());
- let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
- if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
- return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
- }
- }
- #[cfg(any(test, fuzzing))]
- {
- if self.context.is_outbound() {
- let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
- *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
- if let Some(info) = projected_commit_tx_info {
- let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
- + self.context.holding_cell_htlc_updates.len();
- if info.total_pending_htlcs == total_pending_htlcs
- && info.next_holder_htlc_id == self.context.next_holder_htlc_id
- && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
- && info.feerate == self.context.feerate_per_kw {
- assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
- }
- }
- }
+ /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
+ /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
+ /// resent.
+ /// No further message handling calls may be made until a channel_reestablish dance has
+ /// completed.
+ pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) where L::Target: Logger {
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ return;
}
- if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
- return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
+ // While the below code should be idempotent, it's simpler to just return early, as
+ // redundant disconnect events can fire, though they should be rare.
+ return;
}
- // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
- // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
- // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
- // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
- // backwards compatibility, we never use it in production. To provide test coverage, here,
- // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
- #[allow(unused_assignments, unused_mut)]
- let mut separate_nondust_htlc_sources = false;
- #[cfg(all(feature = "std", any(test, fuzzing)))] {
- use core::hash::{BuildHasher, Hasher};
- // Get a random value using the only std API to do so - the DefaultHasher
- let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
- separate_nondust_htlc_sources = rand_val % 2 == 0;
+ if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
+ self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
}
- let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
- let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
- for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
- if let Some(_) = htlc.transaction_output_index {
- let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
- self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.context.opt_anchors(),
- false, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
+ // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
+ // will be retransmitted.
+ self.context.last_sent_closing_fee = None;
+ self.context.pending_counterparty_closing_signed = None;
+ self.context.closing_fee_limits = None;
- let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &keys);
- let htlc_sighashtype = if self.context.opt_anchors() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
- let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
- log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
- log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
- encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.context.channel_id()));
- if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
- return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
- }
- if !separate_nondust_htlc_sources {
- htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
- }
- } else {
- htlcs_and_sigs.push((htlc, None, source_opt.take()));
- }
- if separate_nondust_htlc_sources {
- if let Some(source) = source_opt.take() {
- nondust_htlc_sources.push(source);
- }
+ let mut inbound_drop_count = 0;
+ self.context.pending_inbound_htlcs.retain(|htlc| {
+ match htlc.state {
+ InboundHTLCState::RemoteAnnounced(_) => {
+ // They sent us an update_add_htlc but we never got the commitment_signed.
+ // We'll tell them what commitment_signed we're expecting next and they'll drop
+ // this HTLC accordingly
+ inbound_drop_count += 1;
+ false
+ },
+ InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
+ // We received a commitment_signed updating this HTLC and (at least hopefully)
+ // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
+ // in response to it yet, so don't touch it.
+ true
+ },
+ InboundHTLCState::Committed => true,
+ InboundHTLCState::LocalRemoved(_) => {
+ // We (hopefully) sent a commitment_signed updating this HTLC (which we can
+ // re-transmit if needed) and they may have even sent a revoke_and_ack back
+ // (that we missed). Keep this around for now and if they tell us they missed
+ // the commitment_signed we can re-transmit the update then.
+ true
+ },
}
- debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
- }
-
- let holder_commitment_tx = HolderCommitmentTransaction::new(
- commitment_stats.tx,
- msg.signature,
- msg.htlc_signatures.clone(),
- &self.context.get_holder_pubkeys().funding_pubkey,
- self.context.counterparty_funding_pubkey()
- );
-
- self.context.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
- .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
+ });
+ self.context.next_counterparty_htlc_id -= inbound_drop_count;
- // Update state now that we've passed all the can-fail calls...
- let mut need_commitment = false;
- if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
- if *update_state == FeeUpdateState::RemoteAnnounced {
- *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
- need_commitment = true;
+ if let Some((_, update_state)) = self.context.pending_update_fee {
+ if update_state == FeeUpdateState::RemoteAnnounced {
+ debug_assert!(!self.context.is_outbound());
+ self.context.pending_update_fee = None;
}
}
- for htlc in self.context.pending_inbound_htlcs.iter_mut() {
- let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
- Some(forward_info.clone())
- } else { None };
- if let Some(forward_info) = new_forward {
- log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
- log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
- htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
- need_commitment = true;
- }
- }
- let mut claimed_htlcs = Vec::new();
for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
- log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
- log_bytes!(htlc.payment_hash.0), log_bytes!(self.context.channel_id));
- // Grab the preimage, if it exists, instead of cloning
- let mut reason = OutboundHTLCOutcome::Success(None);
- mem::swap(outcome, &mut reason);
- if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
- // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
- // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
- // have a `Success(None)` reason. In this case we could forget some HTLC
- // claims, but such an upgrade is unlikely and including claimed HTLCs here
- // fixes a bug which the user was exposed to on 0.0.104 when they started the
- // claim anyway.
- claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
- }
- htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
- need_commitment = true;
+ if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
+ // They sent us an update to remove this but haven't yet sent the corresponding
+ // commitment_signed, we need to move it back to Committed and they can re-send
+ // the update upon reconnection.
+ htlc.state = OutboundHTLCState::Committed;
}
}
- self.context.latest_monitor_update_id += 1;
- let mut monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
- commitment_tx: holder_commitment_tx,
- htlc_outputs: htlcs_and_sigs,
- claimed_htlcs,
- nondust_htlc_sources,
- }]
- };
+ self.context.sent_message_awaiting_response = None;
- self.context.cur_holder_commitment_transaction_number -= 1;
- // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
- // build_commitment_no_status_check() next which will reset this to RAAFirst.
- self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
+ self.context.channel_state |= ChannelState::PeerDisconnected as u32;
+ log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
+ }
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
- // In case we initially failed monitor updating without requiring a response, we need
- // to make sure the RAA gets sent first.
- self.context.monitor_pending_revoke_and_ack = true;
- if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
- // If we were going to send a commitment_signed after the RAA, go ahead and do all
- // the corresponding HTLC status updates so that get_last_commitment_update
- // includes the right HTLCs.
- self.context.monitor_pending_commitment_signed = true;
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- }
- log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
- log_bytes!(self.context.channel_id));
- return Ok(self.push_ret_blockable_mon_update(monitor_update));
- }
+ /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
+ /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
+ /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
+ /// update completes (potentially immediately).
+ /// The messages which were generated with the monitor update must *not* have been sent to the
+ /// remote end, and must instead have been dropped. They will be regenerated when
+ /// [`Self::monitor_updating_restored`] is called.
+ ///
+ /// [`ChannelManager`]: super::channelmanager::ChannelManager
+ /// [`chain::Watch`]: crate::chain::Watch
+ /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
+ fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
+ resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
+ mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
+ mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
+ ) {
+ self.context.monitor_pending_revoke_and_ack |= resend_raa;
+ self.context.monitor_pending_commitment_signed |= resend_commitment;
+ self.context.monitor_pending_channel_ready |= resend_channel_ready;
+ self.context.monitor_pending_forwards.append(&mut pending_forwards);
+ self.context.monitor_pending_failures.append(&mut pending_fails);
+ self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
+ self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
+ }
- let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
- // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
- // we'll send one right away when we get the revoke_and_ack when we
- // free_holding_cell_htlcs().
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- true
- } else { false };
+ /// Indicates that the latest ChannelMonitor update has been committed by the client
+ /// successfully and we should restore normal operation. Returns messages which should be sent
+ /// to the remote side.
+ pub fn monitor_updating_restored<L: Deref, NS: Deref>(
+ &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
+ user_config: &UserConfig, best_block_height: u32
+ ) -> MonitorRestoreUpdates
+ where
+ L::Target: Logger,
+ NS::Target: NodeSigner
+ {
+ assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
+ self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
+ let mut found_blocked = false;
+ self.context.pending_monitor_updates.retain(|upd| {
+ if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
+ if upd.blocked { found_blocked = true; }
+ upd.blocked
+ });
- log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
- log_bytes!(self.context.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" });
- self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
- return Ok(self.push_ret_blockable_mon_update(monitor_update));
- }
+ // If we're past (or at) the FundingSent stage on an outbound channel, try to
+ // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
+ // first received the funding_signed.
+ let mut funding_broadcastable =
+ if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
+ self.context.funding_transaction.take()
+ } else { None };
+ // That said, if the funding transaction is already confirmed (ie we're active with a
+ // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
+ if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
+ funding_broadcastable = None;
+ }
- /// Public version of the below, checking relevant preconditions first.
- /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
- /// returns `(None, Vec::new())`.
- pub fn maybe_free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
- if self.context.channel_state >= ChannelState::ChannelReady as u32 &&
- (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
- self.free_holding_cell_htlcs(logger)
- } else { (None, Vec::new()) }
- }
+ // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
+ // (and we assume the user never directly broadcasts the funding transaction and waits for
+ // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
+ // * an inbound channel that failed to persist the monitor on funding_created and we got
+ // the funding transaction confirmed before the monitor was persisted, or
+ // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
+ let channel_ready = if self.context.monitor_pending_channel_ready {
+ assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
+ "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
+ self.context.monitor_pending_channel_ready = false;
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ })
+ } else { None };
- /// Frees any pending commitment updates in the holding cell, generating the relevant messages
- /// for our counterparty.
- fn free_holding_cell_htlcs<L: Deref>(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
- if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
- log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
- if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, log_bytes!(self.context.channel_id()));
+ let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
- let mut monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
- updates: Vec::new(),
- };
+ let mut accepted_htlcs = Vec::new();
+ mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
+ let mut failed_htlcs = Vec::new();
+ mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
+ let mut finalized_claimed_htlcs = Vec::new();
+ mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
- let mut htlc_updates = Vec::new();
- mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
- let mut update_add_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut update_fulfill_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut update_fail_htlcs = Vec::with_capacity(htlc_updates.len());
- let mut htlcs_to_fail = Vec::new();
- for htlc_update in htlc_updates.drain(..) {
- // Note that this *can* fail, though it should be due to rather-rare conditions on
- // fee races with adding too many outputs which push our total payments just over
- // the limit. In case it's less rare than I anticipate, we may want to revisit
- // handling this case better and maybe fulfilling some of the HTLCs while attempting
- // to rebalance channels.
- match &htlc_update {
- &HTLCUpdateAwaitingACK::AddHTLC {amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet, ..} => {
- match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(), false, logger) {
- Ok(update_add_msg_option) => update_add_htlcs.push(update_add_msg_option.unwrap()),
- Err(e) => {
- match e {
- ChannelError::Ignore(ref msg) => {
- log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}",
- log_bytes!(payment_hash.0), msg, log_bytes!(self.context.channel_id()));
- // If we fail to send here, then this HTLC should
- // be failed backwards. Failing to send here
- // indicates that this HTLC may keep being put back
- // into the holding cell without ever being
- // successfully forwarded/failed/fulfilled, causing
- // our counterparty to eventually close on us.
- htlcs_to_fail.push((source.clone(), *payment_hash));
- },
- _ => {
- panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
- },
- }
- }
- }
- },
- &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
- // If an HTLC claim was previously added to the holding cell (via
- // `get_update_fulfill_htlc`, then generating the claim message itself must
- // not fail - any in between attempts to claim the HTLC will have resulted
- // in it hitting the holding cell again and we cannot change the state of a
- // holding cell HTLC from fulfill to anything else.
- let (update_fulfill_msg_option, mut additional_monitor_update) =
- if let UpdateFulfillFetch::NewClaim { msg, monitor_update, .. } = self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger) {
- (msg, monitor_update)
- } else { unreachable!() };
- update_fulfill_htlcs.push(update_fulfill_msg_option.unwrap());
- monitor_update.updates.append(&mut additional_monitor_update.updates);
- },
- &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
- match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
- Ok(update_fail_msg_option) => {
- // If an HTLC failure was previously added to the holding cell (via
- // `queue_fail_htlc`) then generating the fail message itself must
- // not fail - we should never end up in a state where we double-fail
- // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
- // for a full revocation before failing.
- update_fail_htlcs.push(update_fail_msg_option.unwrap())
- },
- Err(e) => {
- if let ChannelError::Ignore(_) = e {}
- else {
- panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
- }
- }
- }
- },
- }
- }
- if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.context.holding_cell_update_fee.is_none() {
- return (None, htlcs_to_fail);
- }
- let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
- self.send_update_fee(feerate, false, logger)
- } else {
- None
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ return MonitorRestoreUpdates {
+ raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
+ accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
};
+ }
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
- // but we want them to be strictly increasing by one, so reset it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
-
- log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
- log_bytes!(self.context.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" },
- update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len());
+ let raa = if self.context.monitor_pending_revoke_and_ack {
+ Some(self.get_last_revoke_and_ack())
+ } else { None };
+ let commitment_update = if self.context.monitor_pending_commitment_signed {
+ self.mark_awaiting_response();
+ Some(self.get_last_commitment_update(logger))
+ } else { None };
- self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
- } else {
- (None, Vec::new())
+ self.context.monitor_pending_revoke_and_ack = false;
+ self.context.monitor_pending_commitment_signed = false;
+ let order = self.context.resend_order.clone();
+ log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
+ log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
+ if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
+ match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
+ MonitorRestoreUpdates {
+ raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
}
}
- /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
- /// commitment_signed message here in case we had pending outbound HTLCs to add which were
- /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
- /// generating an appropriate error *after* the channel state has been updated based on the
- /// revoke_and_ack message.
- pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
- where L::Target: Logger,
+ pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
{
- if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
+ if self.context.is_outbound() {
+ return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
}
if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
- }
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
- return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
+ return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
}
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
+ let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
- let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
-
- if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
- if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
- return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
+ self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
+ self.context.update_time_counter += 1;
+ // If the feerate has increased over the previous dust buffer (note that
+ // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
+ // won't be pushed over our dust exposure limit by the feerate increase.
+ if feerate_over_dust_buffer {
+ let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
+ let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
+ let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
+ let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
+ if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
+ msg.feerate_per_kw, holder_tx_dust_exposure)));
+ }
+ if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
+ return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
+ msg.feerate_per_kw, counterparty_tx_dust_exposure)));
}
}
+ Ok(())
+ }
- if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
- // Our counterparty seems to have burned their coins to us (by revoking a state when we
- // haven't given them a new commitment transaction to broadcast). We should probably
- // take advantage of this by updating our channel monitor, sending them an error, and
- // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
- // lot of work, and there's some chance this is all a misunderstanding anyway.
- // We have to do *something*, though, since our signer may get mad at us for otherwise
- // jumping a remote commitment number, so best to just force-close and move on.
- return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
- }
-
- #[cfg(any(test, fuzzing))]
- {
- *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
- *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
- }
-
- self.context.holder_signer.validate_counterparty_revocation(
- self.context.cur_counterparty_commitment_transaction_number + 1,
- &secret
- ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
-
- self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
- .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
- self.context.latest_monitor_update_id += 1;
- let mut monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
- idx: self.context.cur_counterparty_commitment_transaction_number + 1,
- secret: msg.per_commitment_secret,
- }],
- };
-
- // Update state now that we've passed all the can-fail calls...
- // (note that we may still fail to generate the new commitment_signed message, but that's
- // OK, we step the channel here and *then* if the new generation fails we can fail the
- // channel based on that, but stepping stuff here should be safe either way.
- self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
- self.context.sent_message_awaiting_response = None;
- self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
- self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
- self.context.cur_counterparty_commitment_transaction_number -= 1;
-
- if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
- self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
+ msgs::RevokeAndACK {
+ channel_id: self.context.channel_id,
+ per_commitment_secret,
+ next_per_commitment_point,
+ #[cfg(taproot)]
+ next_local_nonce: None,
}
+ }
- log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", log_bytes!(self.context.channel_id()));
- let mut to_forward_infos = Vec::new();
- let mut revoked_htlcs = Vec::new();
- let mut finalized_claimed_htlcs = Vec::new();
+ fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
+ let mut update_add_htlcs = Vec::new();
+ let mut update_fulfill_htlcs = Vec::new();
let mut update_fail_htlcs = Vec::new();
let mut update_fail_malformed_htlcs = Vec::new();
- let mut require_commitment = false;
- let mut value_to_self_msat_diff: i64 = 0;
-
- {
- // Take references explicitly so that we can hold multiple references to self.context.
- let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
- let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
-
- // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
- pending_inbound_htlcs.retain(|htlc| {
- if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
- log_trace!(logger, " ...removing inbound LocalRemoved {}", log_bytes!(htlc.payment_hash.0));
- if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
- value_to_self_msat_diff += htlc.amount_msat as i64;
- }
- false
- } else { true }
- });
- pending_outbound_htlcs.retain(|htlc| {
- if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
- log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", log_bytes!(htlc.payment_hash.0));
- if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
- revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
- } else {
- finalized_claimed_htlcs.push(htlc.source.clone());
- // They fulfilled, so we sent them money
- value_to_self_msat_diff -= htlc.amount_msat as i64;
- }
- false
- } else { true }
- });
- for htlc in pending_inbound_htlcs.iter_mut() {
- let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
- true
- } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
- true
- } else { false };
- if swap {
- let mut state = InboundHTLCState::Committed;
- mem::swap(&mut state, &mut htlc.state);
- if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
- htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
- require_commitment = true;
- } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
- match forward_info {
- PendingHTLCStatus::Fail(fail_msg) => {
- log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", log_bytes!(htlc.payment_hash.0));
- require_commitment = true;
- match fail_msg {
- HTLCFailureMsg::Relay(msg) => {
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
- update_fail_htlcs.push(msg)
- },
- HTLCFailureMsg::Malformed(msg) => {
- htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
- update_fail_malformed_htlcs.push(msg)
- },
- }
- },
- PendingHTLCStatus::Forward(forward_info) => {
- log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", log_bytes!(htlc.payment_hash.0));
- to_forward_infos.push((forward_info, htlc.htlc_id));
- htlc.state = InboundHTLCState::Committed;
- }
- }
- }
- }
- }
- for htlc in pending_outbound_htlcs.iter_mut() {
- if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
- log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", log_bytes!(htlc.payment_hash.0));
- htlc.state = OutboundHTLCState::Committed;
- }
- if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
- log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
- // Grab the preimage, if it exists, instead of cloning
- let mut reason = OutboundHTLCOutcome::Success(None);
- mem::swap(outcome, &mut reason);
- htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
- require_commitment = true;
- }
+ for htlc in self.context.pending_outbound_htlcs.iter() {
+ if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
+ update_add_htlcs.push(msgs::UpdateAddHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ amount_msat: htlc.amount_msat,
+ payment_hash: htlc.payment_hash,
+ cltv_expiry: htlc.cltv_expiry,
+ onion_routing_packet: (**onion_packet).clone(),
+ });
}
}
- self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
- if let Some((feerate, update_state)) = self.context.pending_update_fee {
- match update_state {
- FeeUpdateState::Outbound => {
- debug_assert!(self.context.is_outbound());
- log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
- self.context.feerate_per_kw = feerate;
- self.context.pending_update_fee = None;
- },
- FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
- FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
- debug_assert!(!self.context.is_outbound());
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
- require_commitment = true;
- self.context.feerate_per_kw = feerate;
- self.context.pending_update_fee = None;
- },
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
+ match reason {
+ &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
+ update_fail_htlcs.push(msgs::UpdateFailHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ reason: err_packet.clone()
+ });
+ },
+ &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
+ update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ sha256_of_onion: sha256_of_onion.clone(),
+ failure_code: failure_code.clone(),
+ });
+ },
+ &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
+ update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
+ channel_id: self.context.channel_id(),
+ htlc_id: htlc.htlc_id,
+ payment_preimage: payment_preimage.clone(),
+ });
+ },
+ }
}
}
- if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
- // We can't actually generate a new commitment transaction (incl by freeing holding
- // cells) while we can't update the monitor, so we just return what we have.
- if require_commitment {
- self.context.monitor_pending_commitment_signed = true;
- // When the monitor updating is restored we'll call get_last_commitment_update(),
- // which does not update state, but we're definitely now awaiting a remote revoke
- // before we can step forward any more, so set it here.
- let mut additional_update = self.build_commitment_no_status_check(logger);
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
- }
- self.context.monitor_pending_forwards.append(&mut to_forward_infos);
- self.context.monitor_pending_failures.append(&mut revoked_htlcs);
- self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
- log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id()));
- return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update)));
+ let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
+ Some(msgs::UpdateFee {
+ channel_id: self.context.channel_id(),
+ feerate_per_kw: self.context.pending_update_fee.unwrap().0,
+ })
+ } else { None };
+
+ log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
+ log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
+ update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
+ msgs::CommitmentUpdate {
+ update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
+ commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
}
+ }
- match self.free_holding_cell_htlcs(logger) {
- (Some(_), htlcs_to_fail) => {
- let mut additional_update = self.context.pending_monitor_updates.pop().unwrap().update;
- // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
-
- self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
- },
- (None, htlcs_to_fail) => {
- if require_commitment {
- let mut additional_update = self.build_commitment_no_status_check(logger);
-
- // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
- // strictly increasing by one, so decrement it here.
- self.context.latest_monitor_update_id = monitor_update.update_id;
- monitor_update.updates.append(&mut additional_update.updates);
-
- log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
- log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
- self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
- } else {
- log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id()));
- self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
- Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update)))
- }
- }
- }
- }
-
- /// Queues up an outbound update fee by placing it in the holding cell. You should call
- /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
- /// commitment update.
- pub fn queue_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, logger: &L) where L::Target: Logger {
- let msg_opt = self.send_update_fee(feerate_per_kw, true, logger);
- assert!(msg_opt.is_none(), "We forced holding cell?");
- }
-
- /// Adds a pending update to this channel. See the doc for send_htlc for
- /// further details on the optionness of the return value.
- /// If our balance is too low to cover the cost of the next commitment transaction at the
- /// new feerate, the update is cancelled.
+ /// May panic if some calls other than message-handling calls (which will all Err immediately)
+ /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
///
- /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
- /// [`Channel`] if `force_holding_cell` is false.
- fn send_update_fee<L: Deref>(&mut self, feerate_per_kw: u32, mut force_holding_cell: bool, logger: &L) -> Option<msgs::UpdateFee> where L::Target: Logger {
- if !self.context.is_outbound() {
- panic!("Cannot send fee from inbound channel");
- }
- if !self.context.is_usable() {
- panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
- }
- if !self.context.is_live() {
- panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
- }
-
- // Before proposing a feerate update, check that we can actually afford the new fee.
- let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
- let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
- let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
- let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.opt_anchors()) * 1000;
- let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
- if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
- //TODO: auto-close after a number of failures?
- log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
- return None;
+ /// Some links printed in log lines are included here to check them during build (when run with
+ /// `cargo doc --document-private-items`):
+ /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
+ /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
+ pub fn channel_reestablish<L: Deref, NS: Deref>(
+ &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
+ genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
+ ) -> Result<ReestablishResponses, ChannelError>
+ where
+ L::Target: Logger,
+ NS::Target: NodeSigner
+ {
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
+ // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
+ // almost certainly indicates we are going to end up out-of-sync in some way, so we
+ // just close here instead of trying to recover.
+ return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
}
- // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
- let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
- let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
- log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
- return None;
- }
- if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
- log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
- return None;
+ if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
+ msg.next_local_commitment_number == 0 {
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
}
- if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
- force_holding_cell = true;
+ if msg.next_remote_commitment_number > 0 {
+ let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
+ let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
+ .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
+ if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
+ return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
+ }
+ if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+ macro_rules! log_and_panic {
+ ($err_msg: expr) => {
+ log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
+ panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
+ }
+ }
+ log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
+ This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
+ More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
+ If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
+ ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
+ ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
+ Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
+ See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
+ }
}
- if force_holding_cell {
- self.context.holding_cell_update_fee = Some(feerate_per_kw);
- return None;
+ // Before we change the state of the channel, we check if the peer is sending a very old
+ // commitment transaction number, if yes we send a warning message.
+ let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
+ if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
+ return Err(
+ ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
+ );
}
- debug_assert!(self.context.pending_update_fee.is_none());
- self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
+ // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
+ // remaining cases either succeed or ErrorMessage-fail).
+ self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
+ self.context.sent_message_awaiting_response = None;
- Some(msgs::UpdateFee {
- channel_id: self.context.channel_id,
- feerate_per_kw,
- })
- }
+ let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ Some(msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ })
+ } else { None };
- /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
- /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
- /// resent.
- /// No further message handling calls may be made until a channel_reestablish dance has
- /// completed.
- pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if self.context.channel_state < ChannelState::FundingSent as u32 {
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- return;
- }
+ let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
- // While the below code should be idempotent, it's simpler to just return early, as
- // redundant disconnect events can fire, though they should be rare.
- return;
- }
+ if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
+ // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
+ if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
+ self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ if msg.next_remote_commitment_number != 0 {
+ return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
+ }
+ // Short circuit the whole handler as there is nothing we can resend them
+ return Ok(ReestablishResponses {
+ channel_ready: None,
+ raa: None, commitment_update: None,
+ order: RAACommitmentOrder::CommitmentFirst,
+ shutdown_msg, announcement_sigs,
+ });
+ }
- if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
- self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
+ // We have OurChannelReady set!
+ let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ return Ok(ReestablishResponses {
+ channel_ready: Some(msgs::ChannelReady {
+ channel_id: self.context.channel_id(),
+ next_per_commitment_point,
+ short_channel_id_alias: Some(self.context.outbound_scid_alias),
+ }),
+ raa: None, commitment_update: None,
+ order: RAACommitmentOrder::CommitmentFirst,
+ shutdown_msg, announcement_sigs,
+ });
}
- // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
- // will be retransmitted.
- self.context.last_sent_closing_fee = None;
- self.context.pending_counterparty_closing_signed = None;
- self.context.closing_fee_limits = None;
-
- let mut inbound_drop_count = 0;
- self.context.pending_inbound_htlcs.retain(|htlc| {
- match htlc.state {
- InboundHTLCState::RemoteAnnounced(_) => {
- // They sent us an update_add_htlc but we never got the commitment_signed.
- // We'll tell them what commitment_signed we're expecting next and they'll drop
- // this HTLC accordingly
- inbound_drop_count += 1;
- false
- },
- InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
- // We received a commitment_signed updating this HTLC and (at least hopefully)
- // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
- // in response to it yet, so don't touch it.
- true
- },
- InboundHTLCState::Committed => true,
- InboundHTLCState::LocalRemoved(_) => {
- // We (hopefully) sent a commitment_signed updating this HTLC (which we can
- // re-transmit if needed) and they may have even sent a revoke_and_ack back
- // (that we missed). Keep this around for now and if they tell us they missed
- // the commitment_signed we can re-transmit the update then.
- true
- },
+ let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
+ // Remote isn't waiting on any RevokeAndACK from us!
+ // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
+ None
+ } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
+ if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ self.context.monitor_pending_revoke_and_ack = true;
+ None
+ } else {
+ Some(self.get_last_revoke_and_ack())
}
- });
- self.context.next_counterparty_htlc_id -= inbound_drop_count;
+ } else {
+ return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
+ };
- if let Some((_, update_state)) = self.context.pending_update_fee {
- if update_state == FeeUpdateState::RemoteAnnounced {
- debug_assert!(!self.context.is_outbound());
- self.context.pending_update_fee = None;
- }
+ // We increment cur_counterparty_commitment_transaction_number only upon receipt of
+ // revoke_and_ack, not on sending commitment_signed, so we add one if have
+ // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
+ // the corresponding revoke_and_ack back yet.
+ let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
+ if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
+ self.mark_awaiting_response();
}
+ let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
- // They sent us an update to remove this but haven't yet sent the corresponding
- // commitment_signed, we need to move it back to Committed and they can re-send
- // the update upon reconnection.
- htlc.state = OutboundHTLCState::Committed;
- }
- }
-
- self.context.sent_message_awaiting_response = None;
-
- self.context.channel_state |= ChannelState::PeerDisconnected as u32;
- log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, log_bytes!(self.context.channel_id()));
- }
-
- /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
- /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
- /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
- /// update completes (potentially immediately).
- /// The messages which were generated with the monitor update must *not* have been sent to the
- /// remote end, and must instead have been dropped. They will be regenerated when
- /// [`Self::monitor_updating_restored`] is called.
- ///
- /// [`ChannelManager`]: super::channelmanager::ChannelManager
- /// [`chain::Watch`]: crate::chain::Watch
- /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
- fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
- resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
- mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
- mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
- ) {
- self.context.monitor_pending_revoke_and_ack |= resend_raa;
- self.context.monitor_pending_commitment_signed |= resend_commitment;
- self.context.monitor_pending_channel_ready |= resend_channel_ready;
- self.context.monitor_pending_forwards.append(&mut pending_forwards);
- self.context.monitor_pending_failures.append(&mut pending_fails);
- self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
- self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
- }
-
- /// Indicates that the latest ChannelMonitor update has been committed by the client
- /// successfully and we should restore normal operation. Returns messages which should be sent
- /// to the remote side.
- pub fn monitor_updating_restored<L: Deref, NS: Deref>(
- &mut self, logger: &L, node_signer: &NS, genesis_block_hash: BlockHash,
- user_config: &UserConfig, best_block_height: u32
- ) -> MonitorRestoreUpdates
- where
- L::Target: Logger,
- NS::Target: NodeSigner
- {
- assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
- self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
- let mut found_blocked = false;
- self.context.pending_monitor_updates.retain(|upd| {
- if found_blocked { debug_assert!(upd.blocked, "No mons may be unblocked after a blocked one"); }
- if upd.blocked { found_blocked = true; }
- upd.blocked
- });
-
- // If we're past (or at) the FundingSent stage on an outbound channel, try to
- // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
- // first received the funding_signed.
- let mut funding_broadcastable =
- if self.context.is_outbound() && self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::FundingSent as u32 {
- self.context.funding_transaction.take()
- } else { None };
- // That said, if the funding transaction is already confirmed (ie we're active with a
- // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
- if self.context.channel_state & !MULTI_STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
- funding_broadcastable = None;
- }
-
- // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
- // (and we assume the user never directly broadcasts the funding transaction and waits for
- // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
- // * an inbound channel that failed to persist the monitor on funding_created and we got
- // the funding transaction confirmed before the monitor was persisted, or
- // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
- let channel_ready = if self.context.monitor_pending_channel_ready {
- assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
- "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
- self.context.monitor_pending_channel_ready = false;
+ let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
+ // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
Some(msgs::ChannelReady {
channel_id: self.context.channel_id(),
})
} else { None };
- let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block_height, logger);
+ if msg.next_local_commitment_number == next_counterparty_commitment_number {
+ if required_revoke.is_some() {
+ log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
+ } else {
+ log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
+ }
- let mut accepted_htlcs = Vec::new();
- mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
- let mut failed_htlcs = Vec::new();
- mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
- let mut finalized_claimed_htlcs = Vec::new();
- mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ raa: required_revoke,
+ commitment_update: None,
+ order: self.context.resend_order.clone(),
+ })
+ } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
+ if required_revoke.is_some() {
+ log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
+ } else {
+ log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
+ }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
- self.context.monitor_pending_revoke_and_ack = false;
- self.context.monitor_pending_commitment_signed = false;
- return MonitorRestoreUpdates {
- raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
- accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
- };
+ if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ self.context.monitor_pending_commitment_signed = true;
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ commitment_update: None, raa: None,
+ order: self.context.resend_order.clone(),
+ })
+ } else {
+ Ok(ReestablishResponses {
+ channel_ready, shutdown_msg, announcement_sigs,
+ raa: required_revoke,
+ commitment_update: Some(self.get_last_commitment_update(logger)),
+ order: self.context.resend_order.clone(),
+ })
+ }
+ } else {
+ Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
}
+ }
- let raa = if self.context.monitor_pending_revoke_and_ack {
- Some(self.get_last_revoke_and_ack())
- } else { None };
- let commitment_update = if self.context.monitor_pending_commitment_signed {
- self.mark_awaiting_response();
- Some(self.get_last_commitment_update(logger))
- } else { None };
+ /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
+ /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
+ /// at which point they will be recalculated.
+ fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
+ -> (u64, u64)
+ where F::Target: FeeEstimator
+ {
+ if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
- self.context.monitor_pending_revoke_and_ack = false;
- self.context.monitor_pending_commitment_signed = false;
- let order = self.context.resend_order.clone();
- log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
- log_bytes!(self.context.channel_id()), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
- if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
- match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
- MonitorRestoreUpdates {
- raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
+ // Propose a range from our current Background feerate to our Normal feerate plus our
+ // force_close_avoidance_max_fee_satoshis.
+ // If we fail to come to consensus, we'll have to force-close.
+ let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
+ let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
+
+ // The spec requires that (when the channel does not have anchors) we only send absolute
+ // channel fees no greater than the absolute channel fee on the current commitment
+ // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
+ // very good reason to apply such a limit in any case. We don't bother doing so, risking
+ // some force-closure by old nodes, but we wanted to close the channel anyway.
+
+ if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
+ let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
+ proposed_feerate = cmp::max(proposed_feerate, min_feerate);
+ proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
}
+
+ // Note that technically we could end up with a lower minimum fee if one sides' balance is
+ // below our dust limit, causing the output to disappear. We don't bother handling this
+ // case, however, as this should only happen if a channel is closed before any (material)
+ // payments have been made on it. This may cause slight fee overpayment and/or failure to
+ // come to consensus with our counterparty on appropriate fees, however it should be a
+ // relatively rare case. We can revisit this later, though note that in order to determine
+ // if the funders' output is dust we have to know the absolute fee we're going to use.
+ let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
+ let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
+ let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
+ // We always add force_close_avoidance_max_fee_satoshis to our normal
+ // feerate-calculated fee, but allow the max to be overridden if we're using a
+ // target feerate-calculated fee.
+ cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
+ proposed_max_feerate as u64 * tx_weight / 1000)
+ } else {
+ self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
+ };
+
+ self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
+ self.context.closing_fee_limits.clone().unwrap()
}
- pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
- where F::Target: FeeEstimator, L::Target: Logger
- {
- if self.context.is_outbound() {
- return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
- }
- Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
- let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
+ /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
+ /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
+ /// this point if we're the funder we should send the initial closing_signed, and in any case
+ /// shutdown should complete within a reasonable timeframe.
+ fn closing_negotiation_ready(&self) -> bool {
+ self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
+ self.context.channel_state &
+ (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
+ ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
+ == BOTH_SIDES_SHUTDOWN_MASK &&
+ self.context.pending_update_fee.is_none()
+ }
- self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
- self.context.update_time_counter += 1;
- // If the feerate has increased over the previous dust buffer (note that
- // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
- // won't be pushed over our dust exposure limit by the feerate increase.
- if feerate_over_dust_buffer {
- let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
- let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
- let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
- let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
- if holder_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
- return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
- msg.feerate_per_kw, holder_tx_dust_exposure)));
- }
- if counterparty_tx_dust_exposure > self.context.get_max_dust_htlc_exposure_msat() {
- return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
- msg.feerate_per_kw, counterparty_tx_dust_exposure)));
+ /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
+ /// an Err if no progress is being made and the channel should be force-closed instead.
+ /// Should be called on a one-minute timer.
+ pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
+ if self.closing_negotiation_ready() {
+ if self.context.closing_signed_in_flight {
+ return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
+ } else {
+ self.context.closing_signed_in_flight = true;
}
}
Ok(())
}
- fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
- let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let per_commitment_secret = self.context.holder_signer.release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
- msgs::RevokeAndACK {
- channel_id: self.context.channel_id,
- per_commitment_secret,
- next_per_commitment_point,
- #[cfg(taproot)]
- next_local_nonce: None,
- }
- }
-
- fn get_last_commitment_update<L: Deref>(&self, logger: &L) -> msgs::CommitmentUpdate where L::Target: Logger {
- let mut update_add_htlcs = Vec::new();
- let mut update_fulfill_htlcs = Vec::new();
- let mut update_fail_htlcs = Vec::new();
- let mut update_fail_malformed_htlcs = Vec::new();
-
- for htlc in self.context.pending_outbound_htlcs.iter() {
- if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
- update_add_htlcs.push(msgs::UpdateAddHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc.htlc_id,
- amount_msat: htlc.amount_msat,
- payment_hash: htlc.payment_hash,
- cltv_expiry: htlc.cltv_expiry,
- onion_routing_packet: (**onion_packet).clone(),
- });
- }
+ pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
+ where F::Target: FeeEstimator, L::Target: Logger
+ {
+ if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
+ return Ok((None, None));
}
- for htlc in self.context.pending_inbound_htlcs.iter() {
- if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
- match reason {
- &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
- update_fail_htlcs.push(msgs::UpdateFailHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc.htlc_id,
- reason: err_packet.clone()
- });
- },
- &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
- update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc.htlc_id,
- sha256_of_onion: sha256_of_onion.clone(),
- failure_code: failure_code.clone(),
- });
- },
- &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
- update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
- channel_id: self.context.channel_id(),
- htlc_id: htlc.htlc_id,
- payment_preimage: payment_preimage.clone(),
- });
- },
- }
+ if !self.context.is_outbound() {
+ if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
+ return self.closing_signed(fee_estimator, &msg);
}
+ return Ok((None, None));
}
- let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
- Some(msgs::UpdateFee {
- channel_id: self.context.channel_id(),
- feerate_per_kw: self.context.pending_update_fee.unwrap().0,
- })
- } else { None };
+ let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
- log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
- log_bytes!(self.context.channel_id()), if update_fee.is_some() { " update_fee," } else { "" },
- update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
- msgs::CommitmentUpdate {
- update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
- commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0,
- }
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
+ log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
+ our_min_fee, our_max_fee, total_fee_satoshis);
+
+ let sig = self.context.holder_signer
+ .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
+ .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
+
+ self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
+ Ok((Some(msgs::ClosingSigned {
+ channel_id: self.context.channel_id,
+ fee_satoshis: total_fee_satoshis,
+ signature: sig,
+ fee_range: Some(msgs::ClosingSignedFeeRange {
+ min_fee_satoshis: our_min_fee,
+ max_fee_satoshis: our_max_fee,
+ }),
+ }), None))
}
- /// May panic if some calls other than message-handling calls (which will all Err immediately)
- /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
+ // Marks a channel as waiting for a response from the counterparty. If it's not received
+ // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
+ // a reconnection.
+ fn mark_awaiting_response(&mut self) {
+ self.context.sent_message_awaiting_response = Some(0);
+ }
+
+ /// Determines whether we should disconnect the counterparty due to not receiving a response
+ /// within our expected timeframe.
///
- /// Some links printed in log lines are included here to check them during build (when run with
- /// `cargo doc --document-private-items`):
- /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
- /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
- pub fn channel_reestablish<L: Deref, NS: Deref>(
- &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
- genesis_block_hash: BlockHash, user_config: &UserConfig, best_block: &BestBlock
- ) -> Result<ReestablishResponses, ChannelError>
- where
- L::Target: Logger,
- NS::Target: NodeSigner
+ /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
+ pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
+ let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
+ ticks_elapsed
+ } else {
+ // Don't disconnect when we're not waiting on a response.
+ return false;
+ };
+ *ticks_elapsed += 1;
+ *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
+ }
+
+ pub fn shutdown<SP: Deref>(
+ &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
+ ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ where SP::Target: SignerProvider
{
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
- // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
- // almost certainly indicates we are going to end up out-of-sync in some way, so we
- // just close here instead of trying to recover.
- return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
+ }
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ // Spec says we should fail the connection, not the channel, but that's nonsense, there
+ // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
+ // can do that via error message without getting a connection fail anyway...
+ return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
+ }
+ for htlc in self.context.pending_inbound_htlcs.iter() {
+ if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
+ return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
+ }
}
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
- msg.next_local_commitment_number == 0 {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish (usually an lnd node with lost state asking us to force-close for them)".to_owned()));
+ if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
+ return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
}
- if msg.next_remote_commitment_number > 0 {
- let expected_point = self.context.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
- let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
- .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
- if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
- return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
- }
- if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
- macro_rules! log_and_panic {
- ($err_msg: expr) => {
- log_error!(logger, $err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
- panic!($err_msg, log_bytes!(self.context.channel_id), log_pubkey!(self.context.counterparty_node_id));
- }
- }
- log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
- This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
- More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
- If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
- ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
- ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
- Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
- See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
+ if self.context.counterparty_shutdown_scriptpubkey.is_some() {
+ if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
+ return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
}
+ } else {
+ self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
}
- // Before we change the state of the channel, we check if the peer is sending a very old
- // commitment transaction number, if yes we send a warning message.
- let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
- if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
- return Err(
- ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
- );
- }
+ // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
+ // immediately after the commitment dance, but we can send a Shutdown because we won't send
+ // any further commitment updates after we set LocalShutdownSent.
+ let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
- // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
- // remaining cases either succeed or ErrorMessage-fail).
- self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
- self.context.sent_message_awaiting_response = None;
+ let update_shutdown_script = match self.context.shutdown_scriptpubkey {
+ Some(_) => false,
+ None => {
+ assert!(send_shutdown);
+ let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
+ };
+ if !shutdown_scriptpubkey.is_compatible(their_features) {
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
+ }
+ self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+ true
+ },
+ };
- let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
- assert!(self.context.shutdown_scriptpubkey.is_some());
+ // From here on out, we may not fail!
+
+ self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
+ self.context.update_time_counter += 1;
+
+ let monitor_update = if update_shutdown_script {
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ }],
+ };
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ if self.push_blockable_mon_update(monitor_update) {
+ self.context.pending_monitor_updates.last().map(|upd| &upd.update)
+ } else { None }
+ } else { None };
+ let shutdown = if send_shutdown {
Some(msgs::Shutdown {
channel_id: self.context.channel_id,
scriptpubkey: self.get_closing_scriptpubkey(),
})
} else { None };
- let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
-
- if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
- // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
- if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
- self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
- if msg.next_remote_commitment_number != 0 {
- return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
- }
- // Short circuit the whole handler as there is nothing we can resend them
- return Ok(ReestablishResponses {
- channel_ready: None,
- raa: None, commitment_update: None,
- order: RAACommitmentOrder::CommitmentFirst,
- shutdown_msg, announcement_sigs,
- });
+ // We can't send our shutdown until we've committed all of our pending HTLCs, but the
+ // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
+ // cell HTLCs and return them to fail the payment.
+ self.context.holding_cell_update_fee = None;
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
+ self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
+ dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
+ false
+ },
+ _ => true
}
+ });
- // We have OurChannelReady set!
- let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- return Ok(ReestablishResponses {
- channel_ready: Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- }),
- raa: None, commitment_update: None,
- order: RAACommitmentOrder::CommitmentFirst,
- shutdown_msg, announcement_sigs,
- });
- }
-
- let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
- // Remote isn't waiting on any RevokeAndACK from us!
- // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
- None
- } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
- self.context.monitor_pending_revoke_and_ack = true;
- None
- } else {
- Some(self.get_last_revoke_and_ack())
- }
- } else {
- return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
- };
-
- // We increment cur_counterparty_commitment_transaction_number only upon receipt of
- // revoke_and_ack, not on sending commitment_signed, so we add one if have
- // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
- // the corresponding revoke_and_ack back yet.
- let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
- if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
- self.mark_awaiting_response();
- }
- let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
+ self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
+ self.context.update_time_counter += 1;
- let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
- // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
- let next_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- Some(msgs::ChannelReady {
- channel_id: self.context.channel_id(),
- next_per_commitment_point,
- short_channel_id_alias: Some(self.context.outbound_scid_alias),
- })
- } else { None };
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
+ }
- if msg.next_local_commitment_number == next_counterparty_commitment_number {
- if required_revoke.is_some() {
- log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.context.channel_id()));
- } else {
- log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.context.channel_id()));
- }
+ fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
+ let mut tx = closing_tx.trust().built_transaction().clone();
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: None,
- order: self.context.resend_order.clone(),
- })
- } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
- if required_revoke.is_some() {
- log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.context.channel_id()));
- } else {
- log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", log_bytes!(self.context.channel_id()));
- }
+ tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
- if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
- self.context.monitor_pending_commitment_signed = true;
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- commitment_update: None, raa: None,
- order: self.context.resend_order.clone(),
- })
- } else {
- Ok(ReestablishResponses {
- channel_ready, shutdown_msg, announcement_sigs,
- raa: required_revoke,
- commitment_update: Some(self.get_last_commitment_update(logger)),
- order: self.context.resend_order.clone(),
- })
- }
+ let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
+ let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
+ let mut holder_sig = sig.serialize_der().to_vec();
+ holder_sig.push(EcdsaSighashType::All as u8);
+ let mut cp_sig = counterparty_sig.serialize_der().to_vec();
+ cp_sig.push(EcdsaSighashType::All as u8);
+ if funding_key[..] < counterparty_funding_key[..] {
+ tx.input[0].witness.push(holder_sig);
+ tx.input[0].witness.push(cp_sig);
} else {
- Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
+ tx.input[0].witness.push(cp_sig);
+ tx.input[0].witness.push(holder_sig);
}
+
+ tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
+ tx
}
- /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
- /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
- /// at which point they will be recalculated.
- fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
- -> (u64, u64)
+ pub fn closing_signed<F: Deref>(
+ &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
+ -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
where F::Target: FeeEstimator
{
- if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
-
- // Propose a range from our current Background feerate to our Normal feerate plus our
- // force_close_avoidance_max_fee_satoshis.
- // If we fail to come to consensus, we'll have to force-close.
- let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
- let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
- let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
-
- // The spec requires that (when the channel does not have anchors) we only send absolute
- // channel fees no greater than the absolute channel fee on the current commitment
- // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
- // very good reason to apply such a limit in any case. We don't bother doing so, risking
- // some force-closure by old nodes, but we wanted to close the channel anyway.
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
+ return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
+ }
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
+ return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
+ }
+ if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
+ return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
+ }
+ if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
+ return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
+ }
- if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
- let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
- proposed_feerate = cmp::max(proposed_feerate, min_feerate);
- proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
+ if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
+ return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
}
- // Note that technically we could end up with a lower minimum fee if one sides' balance is
- // below our dust limit, causing the output to disappear. We don't bother handling this
- // case, however, as this should only happen if a channel is closed before any (material)
- // payments have been made on it. This may cause slight fee overpayment and/or failure to
- // come to consensus with our counterparty on appropriate fees, however it should be a
- // relatively rare case. We can revisit this later, though note that in order to determine
- // if the funders' output is dust we have to know the absolute fee we're going to use.
- let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
- let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
- let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
- // We always add force_close_avoidance_max_fee_satoshis to our normal
- // feerate-calculated fee, but allow the max to be overridden if we're using a
- // target feerate-calculated fee.
- cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
- proposed_max_feerate as u64 * tx_weight / 1000)
- } else {
- self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
- };
+ if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
+ self.context.pending_counterparty_closing_signed = Some(msg.clone());
+ return Ok((None, None));
+ }
- self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
- self.context.closing_fee_limits.clone().unwrap()
- }
+ let funding_redeemscript = self.context.get_funding_redeemscript();
+ let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
+ if used_total_fee != msg.fee_satoshis {
+ return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
+ }
+ let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
- /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
- /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
- /// this point if we're the funder we should send the initial closing_signed, and in any case
- /// shutdown should complete within a reasonable timeframe.
- fn closing_negotiation_ready(&self) -> bool {
- self.context.pending_inbound_htlcs.is_empty() && self.context.pending_outbound_htlcs.is_empty() &&
- self.context.channel_state &
- (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32 |
- ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)
- == BOTH_SIDES_SHUTDOWN_MASK &&
- self.context.pending_update_fee.is_none()
- }
+ match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
+ Ok(_) => {},
+ Err(_e) => {
+ // The remote end may have decided to revoke their output due to inconsistent dust
+ // limits, so check for that case by re-checking the signature here.
+ closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
+ let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
+ secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
+ },
+ };
- /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
- /// an Err if no progress is being made and the channel should be force-closed instead.
- /// Should be called on a one-minute timer.
- pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
- if self.closing_negotiation_ready() {
- if self.context.closing_signed_in_flight {
- return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
- } else {
- self.context.closing_signed_in_flight = true;
+ for outp in closing_tx.trust().built_transaction().output.iter() {
+ if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
}
}
- Ok(())
- }
-
- pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
- &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
- -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
- where F::Target: FeeEstimator, L::Target: Logger
- {
- if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
- return Ok((None, None));
- }
- if !self.context.is_outbound() {
- if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
- return self.closing_signed(fee_estimator, &msg);
+ assert!(self.context.shutdown_scriptpubkey.is_some());
+ if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
+ if last_fee == msg.fee_satoshis {
+ let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.update_time_counter += 1;
+ return Ok((None, Some(tx)));
}
- return Ok((None, None));
}
let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
- assert!(self.context.shutdown_scriptpubkey.is_some());
- let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
- log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
- our_min_fee, our_max_fee, total_fee_satoshis);
+ macro_rules! propose_fee {
+ ($new_fee: expr) => {
+ let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
+ (closing_tx, $new_fee)
+ } else {
+ self.build_closing_transaction($new_fee, false)
+ };
- let sig = self.context.holder_signer
- .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
- .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
-
- self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
- Ok((Some(msgs::ClosingSigned {
- channel_id: self.context.channel_id,
- fee_satoshis: total_fee_satoshis,
- signature: sig,
- fee_range: Some(msgs::ClosingSignedFeeRange {
- min_fee_satoshis: our_min_fee,
- max_fee_satoshis: our_max_fee,
- }),
- }), None))
- }
-
- // Marks a channel as waiting for a response from the counterparty. If it's not received
- // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
- // a reconnection.
- fn mark_awaiting_response(&mut self) {
- self.context.sent_message_awaiting_response = Some(0);
- }
-
- /// Determines whether we should disconnect the counterparty due to not receiving a response
- /// within our expected timeframe.
- ///
- /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
- pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
- let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
- ticks_elapsed
- } else {
- // Don't disconnect when we're not waiting on a response.
- return false;
- };
- *ticks_elapsed += 1;
- *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
- }
-
- pub fn shutdown<SP: Deref>(
- &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
- ) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
- where SP::Target: SignerProvider
- {
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
- }
- if self.context.channel_state < ChannelState::FundingSent as u32 {
- // Spec says we should fail the connection, not the channel, but that's nonsense, there
- // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
- // can do that via error message without getting a connection fail anyway...
- return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
- }
- for htlc in self.context.pending_inbound_htlcs.iter() {
- if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
- return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
- }
- }
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
-
- if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
- return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
- }
-
- if self.context.counterparty_shutdown_scriptpubkey.is_some() {
- if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
- return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
- }
- } else {
- self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
- }
-
- // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
- // immediately after the commitment dance, but we can send a Shutdown because we won't send
- // any further commitment updates after we set LocalShutdownSent.
- let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
-
- let update_shutdown_script = match self.context.shutdown_scriptpubkey {
- Some(_) => false,
- None => {
- assert!(send_shutdown);
- let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => scriptpubkey,
- Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
- };
- if !shutdown_scriptpubkey.is_compatible(their_features) {
- return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
- }
- self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
- true
- },
- };
-
- // From here on out, we may not fail!
-
- self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
- self.context.update_time_counter += 1;
-
- let monitor_update = if update_shutdown_script {
- self.context.latest_monitor_update_id += 1;
- let monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
- scriptpubkey: self.get_closing_scriptpubkey(),
- }],
- };
- self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- if self.push_blockable_mon_update(monitor_update) {
- self.context.pending_monitor_updates.last().map(|upd| &upd.update)
- } else { None }
- } else { None };
- let shutdown = if send_shutdown {
- Some(msgs::Shutdown {
- channel_id: self.context.channel_id,
- scriptpubkey: self.get_closing_scriptpubkey(),
- })
- } else { None };
-
- // We can't send our shutdown until we've committed all of our pending HTLCs, but the
- // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
- // cell HTLCs and return them to fail the payment.
- self.context.holding_cell_update_fee = None;
- let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
- self.context.holding_cell_htlc_updates.retain(|htlc_update| {
- match htlc_update {
- &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
- dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
- false
- },
- _ => true
- }
- });
-
- self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
- self.context.update_time_counter += 1;
-
- Ok((shutdown, monitor_update, dropped_outbound_htlcs))
- }
-
- fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
- let mut tx = closing_tx.trust().built_transaction().clone();
-
- tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
-
- let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
- let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
- let mut holder_sig = sig.serialize_der().to_vec();
- holder_sig.push(EcdsaSighashType::All as u8);
- let mut cp_sig = counterparty_sig.serialize_der().to_vec();
- cp_sig.push(EcdsaSighashType::All as u8);
- if funding_key[..] < counterparty_funding_key[..] {
- tx.input[0].witness.push(holder_sig);
- tx.input[0].witness.push(cp_sig);
- } else {
- tx.input[0].witness.push(cp_sig);
- tx.input[0].witness.push(holder_sig);
- }
-
- tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
- tx
- }
-
- pub fn closing_signed<F: Deref>(
- &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
- -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
- where F::Target: FeeEstimator
- {
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
- return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
- }
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
- return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
- }
- if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
- return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
- }
- if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
- return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
- }
-
- if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
- return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
- }
-
- if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
- self.context.pending_counterparty_closing_signed = Some(msg.clone());
- return Ok((None, None));
- }
-
- let funding_redeemscript = self.context.get_funding_redeemscript();
- let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
- if used_total_fee != msg.fee_satoshis {
- return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
- }
- let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
-
- match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
- Ok(_) => {},
- Err(_e) => {
- // The remote end may have decided to revoke their output due to inconsistent dust
- // limits, so check for that case by re-checking the signature here.
- closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
- let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
- secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
- },
- };
-
- for outp in closing_tx.trust().built_transaction().output.iter() {
- if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
- }
- }
-
- assert!(self.context.shutdown_scriptpubkey.is_some());
- if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
- if last_fee == msg.fee_satoshis {
- let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- self.context.update_time_counter += 1;
- return Ok((None, Some(tx)));
- }
- }
-
- let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
-
- macro_rules! propose_fee {
- ($new_fee: expr) => {
- let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
- (closing_tx, $new_fee)
- } else {
- self.build_closing_transaction($new_fee, false)
- };
-
- let sig = self.context.holder_signer
- .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
+ let sig = self.context.holder_signer
+ .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
let signed_tx = if $new_fee == msg.fee_satoshis {
self.context.channel_state = ChannelState::ShutdownComplete as u32;
// We never learned about the funding confirmation anyway, just ignore
Ok(())
}
- }
+ }
+
+ // Methods to get unprompted messages to send to the remote end (or where we already returned
+ // something in the handler for the message that prompted this message):
+
+ pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
+ if !self.context.is_outbound() {
+ panic!("Tried to open a channel for an inbound channel?");
+ }
+ if self.context.channel_state != ChannelState::OurInitSent as u32 {
+ panic!("Cannot generate an open_channel after we've moved forward");
+ }
+
+ if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Tried to send an open_channel for a channel that has already advanced");
+ }
+
+ let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let keys = self.context.get_holder_pubkeys();
+
+ msgs::OpenChannel {
+ chain_hash,
+ temporary_channel_id: self.context.channel_id,
+ funding_satoshis: self.context.channel_value_satoshis,
+ push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ feerate_per_kw: self.context.feerate_per_kw as u32,
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint,
+ payment_point: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint,
+ htlc_basepoint: keys.htlc_basepoint,
+ first_per_commitment_point,
+ channel_flags: if self.context.config.announced_channel {1} else {0},
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ }
+ }
+
+ pub fn inbound_is_awaiting_accept(&self) -> bool {
+ self.context.inbound_awaiting_accept
+ }
+
+ /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
+ pub fn set_0conf(&mut self) {
+ assert!(self.context.inbound_awaiting_accept);
+ self.context.minimum_depth = Some(0);
+ }
+
+ /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
+ /// should be sent back to the counterparty node.
+ ///
+ /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+ pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
+ if self.context.is_outbound() {
+ panic!("Tried to send accept_channel for an outbound channel?");
+ }
+ if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
+ panic!("Tried to send accept_channel after channel had moved forward");
+ }
+ if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Tried to send an accept_channel for a channel that has already advanced");
+ }
+ if !self.context.inbound_awaiting_accept {
+ panic!("The inbound channel has already been accepted");
+ }
+
+ self.context.user_id = user_id;
+ self.context.inbound_awaiting_accept = false;
+
+ self.generate_accept_channel_message()
+ }
+
+ /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
+ /// inbound channel. If the intention is to accept an inbound channel, use
+ /// [`Channel::accept_inbound_channel`] instead.
+ ///
+ /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+ fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
+ let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
+ let keys = self.context.get_holder_pubkeys();
+
+ msgs::AcceptChannel {
+ temporary_channel_id: self.context.channel_id,
+ dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
+ max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
+ channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
+ htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
+ minimum_depth: self.context.minimum_depth.unwrap(),
+ to_self_delay: self.context.get_holder_selected_contest_delay(),
+ max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
+ funding_pubkey: keys.funding_pubkey,
+ revocation_basepoint: keys.revocation_basepoint,
+ payment_point: keys.payment_point,
+ delayed_payment_basepoint: keys.delayed_payment_basepoint,
+ htlc_basepoint: keys.htlc_basepoint,
+ first_per_commitment_point,
+ shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
+ Some(script) => script.clone().into_inner(),
+ None => Builder::new().into_script(),
+ }),
+ channel_type: Some(self.context.channel_type.clone()),
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ }
+ }
+
+ /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
+ /// inbound channel without accepting it.
+ ///
+ /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
+ #[cfg(test)]
+ pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
+ self.generate_accept_channel_message()
+ }
+
+ /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
+ fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
+ Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
+ }
+
+ /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
+ /// a funding_created message for the remote peer.
+ /// Panics if called at some time other than immediately after initial handshake, if called twice,
+ /// or if called on an inbound channel.
+ /// Note that channel_id changes during this call!
+ /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
+ /// If an Err is returned, it is a ChannelError::Close.
+ pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
+ if !self.context.is_outbound() {
+ panic!("Tried to create outbound funding_created message on an inbound channel!");
+ }
+ if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
+ panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
+ }
+ if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
+ self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
+ self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
+ panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+ }
+
+ self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
+ self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+
+ let signature = match self.get_outbound_funding_created_signature(logger) {
+ Ok(res) => res,
+ Err(e) => {
+ log_error!(logger, "Got bad signatures: {:?}!", e);
+ self.context.channel_transaction_parameters.funding_outpoint = None;
+ return Err(e);
+ }
+ };
+
+ let temporary_channel_id = self.context.channel_id;
+
+ // Now that we're past error-generating stuff, update our local state:
+
+ self.context.channel_state = ChannelState::FundingCreated as u32;
+ self.context.channel_id = funding_txo.to_channel_id();
+ self.context.funding_transaction = Some(funding_transaction);
+
+ Ok(msgs::FundingCreated {
+ temporary_channel_id,
+ funding_txid: funding_txo.txid,
+ funding_output_index: funding_txo.index,
+ signature,
+ #[cfg(taproot)]
+ partial_signature_with_nonce: None,
+ #[cfg(taproot)]
+ next_local_nonce: None,
+ })
+ }
+
+ /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
+ /// announceable and available for use (have exchanged ChannelReady messages in both
+ /// directions). Should be used for both broadcasted announcements and in response to an
+ /// AnnouncementSignatures message from the remote peer.
+ ///
+ /// Will only fail if we're not in a state where channel_announcement may be sent (including
+ /// closing).
+ ///
+ /// This will only return ChannelError::Ignore upon failure.
+ fn get_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
+ ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ if !self.context.config.announced_channel {
+ return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
+ }
+ if !self.context.is_usable() {
+ return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
+ }
+
+ let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+ .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
+ let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
+ let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
+
+ let msg = msgs::UnsignedChannelAnnouncement {
+ features: channelmanager::provided_channel_features(&user_config),
+ chain_hash,
+ short_channel_id: self.context.get_short_channel_id().unwrap(),
+ node_id_1: if were_node_one { node_id } else { counterparty_node_id },
+ node_id_2: if were_node_one { counterparty_node_id } else { node_id },
+ bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
+ bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
+ excess_data: Vec::new(),
+ };
+
+ Ok(msg)
+ }
+
+ fn get_announcement_sigs<NS: Deref, L: Deref>(
+ &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
+ best_block_height: u32, logger: &L
+ ) -> Option<msgs::AnnouncementSignatures>
+ where
+ NS::Target: NodeSigner,
+ L::Target: Logger
+ {
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return None;
+ }
+
+ if !self.context.is_usable() {
+ return None;
+ }
+
+ if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
+ log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
+ return None;
+ }
+
+ if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
+ return None;
+ }
+
+ log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
+ let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
+ Ok(a) => a,
+ Err(e) => {
+ log_trace!(logger, "{:?}", e);
+ return None;
+ }
+ };
+ let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
+ Err(_) => {
+ log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
+ return None;
+ },
+ Ok(v) => v
+ };
+ let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
+ Err(_) => {
+ log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
+ return None;
+ },
+ Ok(v) => v
+ };
+ self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
+
+ Some(msgs::AnnouncementSignatures {
+ channel_id: self.context.channel_id(),
+ short_channel_id: self.context.get_short_channel_id().unwrap(),
+ node_signature: our_node_sig,
+ bitcoin_signature: our_bitcoin_sig,
+ })
+ }
+
+ /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
+ /// available.
+ fn sign_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
+ ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
+ let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
+ .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
+ let were_node_one = announcement.node_id_1 == our_node_key;
+
+ let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
+ .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
+ let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
+ Ok(msgs::ChannelAnnouncement {
+ node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
+ node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
+ bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
+ bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
+ contents: announcement,
+ })
+ } else {
+ Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
+ }
+ }
+
+ /// Processes an incoming announcement_signatures message, providing a fully-signed
+ /// channel_announcement message which we can broadcast and storing our counterparty's
+ /// signatures for later reconstruction/rebroadcast of the channel_announcement.
+ pub fn announcement_signatures<NS: Deref>(
+ &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
+ msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
+ ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
+ let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
+
+ let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
+
+ if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
+ return Err(ChannelError::Close(format!(
+ "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
+ &announcement, self.context.get_counterparty_node_id())));
+ }
+ if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
+ return Err(ChannelError::Close(format!(
+ "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
+ &announcement, self.context.counterparty_funding_pubkey())));
+ }
+
+ self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return Err(ChannelError::Ignore(
+ "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
+ }
+
+ self.sign_channel_announcement(node_signer, announcement)
+ }
+
+ /// Gets a signed channel_announcement for this channel, if we previously received an
+ /// announcement_signatures from our counterparty.
+ pub fn get_signed_channel_announcement<NS: Deref>(
+ &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
+ ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
+ if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
+ return None;
+ }
+ let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
+ Ok(res) => res,
+ Err(_) => return None,
+ };
+ match self.sign_channel_announcement(node_signer, announcement) {
+ Ok(res) => Some(res),
+ Err(_) => None,
+ }
+ }
+
+ /// May panic if called on a channel that wasn't immediately-previously
+ /// self.remove_uncommitted_htlcs_and_mark_paused()'d
+ pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
+ assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
+ assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
+ // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
+ // current to_remote balances. However, it no longer has any use, and thus is now simply
+ // set to a dummy (but valid, as required by the spec) public key.
+ // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
+ // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
+ // valid, and valid in fuzzing mode's arbitrary validity criteria:
+ let mut pk = [2; 33]; pk[1] = 0xff;
+ let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
+ let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
+ let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
+ log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
+ remote_last_secret
+ } else {
+ log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
+ [0;32]
+ };
+ self.mark_awaiting_response();
+ msgs::ChannelReestablish {
+ channel_id: self.context.channel_id(),
+ // The protocol has two different commitment number concepts - the "commitment
+ // transaction number", which starts from 0 and counts up, and the "revocation key
+ // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
+ // commitment transaction numbers by the index which will be used to reveal the
+ // revocation key for that commitment transaction, which means we have to convert them
+ // to protocol-level commitment numbers here...
+
+ // next_local_commitment_number is the next commitment_signed number we expect to
+ // receive (indicating if they need to resend one that we missed).
+ next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
+ // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
+ // receive, however we track it by the next commitment number for a remote transaction
+ // (which is one further, as they always revoke previous commitment transaction, not
+ // the one we send) so we have to decrement by 1. Note that if
+ // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
+ // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
+ // overflow here.
+ next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
+ your_last_per_commitment_secret: remote_last_secret,
+ my_current_per_commitment_point: dummy_pubkey,
+ // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
+ // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
+ // txid of that interactive transaction, else we MUST NOT set it.
+ next_funding_txid: None,
+ }
+ }
+
+
+ // Send stuff to our remote peers:
+
+ /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
+ /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
+ /// commitment update.
+ ///
+ /// `Err`s will only be [`ChannelError::Ignore`].
+ pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+ onion_routing_packet: msgs::OnionPacket, logger: &L)
+ -> Result<(), ChannelError> where L::Target: Logger {
+ self
+ .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
+ .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
+ .map_err(|err| {
+ if let ChannelError::Ignore(_) = err { /* fine */ }
+ else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
+ err
+ })
+ }
+
+ /// Adds a pending outbound HTLC to this channel, note that you probably want
+ /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
+ ///
+ /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
+ /// the wire:
+ /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
+ /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
+ /// awaiting ACK.
+ /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
+ /// we may not yet have sent the previous commitment update messages and will need to
+ /// regenerate them.
+ ///
+ /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
+ /// on this [`Channel`] if `force_holding_cell` is false.
+ ///
+ /// `Err`s will only be [`ChannelError::Ignore`].
+ fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
+ onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
+ -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
+ if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
+ return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
+ }
+ let channel_total_msat = self.context.channel_value_satoshis * 1000;
+ if amount_msat > channel_total_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
+ }
- // Methods to get unprompted messages to send to the remote end (or where we already returned
- // something in the handler for the message that prompted this message):
+ if amount_msat == 0 {
+ return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
+ }
- pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
- if !self.context.is_outbound() {
- panic!("Tried to open a channel for an inbound channel?");
+ let available_balances = self.context.get_available_balances();
+ if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
+ available_balances.next_outbound_htlc_minimum_msat)));
}
- if self.context.channel_state != ChannelState::OurInitSent as u32 {
- panic!("Cannot generate an open_channel after we've moved forward");
+
+ if amount_msat > available_balances.next_outbound_htlc_limit_msat {
+ return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
+ available_balances.next_outbound_htlc_limit_msat)));
}
- if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Tried to send an open_channel for a channel that has already advanced");
+ if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
+ // Note that this should never really happen, if we're !is_live() on receipt of an
+ // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
+ // the user to send directly into a !is_live() channel. However, if we
+ // disconnected during the time the previous hop was doing the commitment dance we may
+ // end up getting here after the forwarding delay. In any case, returning an
+ // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
+ return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
}
- let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let keys = self.context.get_holder_pubkeys();
+ let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
+ log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
+ if force_holding_cell { "into holding cell" }
+ else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
+ else { "to peer" });
- msgs::OpenChannel {
- chain_hash,
- temporary_channel_id: self.context.channel_id,
- funding_satoshis: self.context.channel_value_satoshis,
- push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
- dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
- channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
- htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
- feerate_per_kw: self.context.feerate_per_kw as u32,
- to_self_delay: self.context.get_holder_selected_contest_delay(),
- max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
- funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint,
- payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint,
- htlc_basepoint: keys.htlc_basepoint,
- first_per_commitment_point,
- channel_flags: if self.context.config.announced_channel {1} else {0},
- shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
- Some(script) => script.clone().into_inner(),
- None => Builder::new().into_script(),
- }),
- channel_type: Some(self.context.channel_type.clone()),
+ if need_holding_cell {
+ force_holding_cell = true;
}
- }
- pub fn inbound_is_awaiting_accept(&self) -> bool {
- self.context.inbound_awaiting_accept
- }
+ // Now update local state:
+ if force_holding_cell {
+ self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
+ amount_msat,
+ payment_hash,
+ cltv_expiry,
+ source,
+ onion_routing_packet,
+ });
+ return Ok(None);
+ }
- /// Sets this channel to accepting 0conf, must be done before `get_accept_channel`
- pub fn set_0conf(&mut self) {
- assert!(self.context.inbound_awaiting_accept);
- self.context.minimum_depth = Some(0);
+ self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
+ htlc_id: self.context.next_holder_htlc_id,
+ amount_msat,
+ payment_hash: payment_hash.clone(),
+ cltv_expiry,
+ state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
+ source,
+ });
+
+ let res = msgs::UpdateAddHTLC {
+ channel_id: self.context.channel_id,
+ htlc_id: self.context.next_holder_htlc_id,
+ amount_msat,
+ payment_hash,
+ cltv_expiry,
+ onion_routing_packet,
+ };
+ self.context.next_holder_htlc_id += 1;
+
+ Ok(Some(res))
}
- /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
- /// should be sent back to the counterparty node.
- ///
- /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- pub fn accept_inbound_channel(&mut self, user_id: u128) -> msgs::AcceptChannel {
- if self.context.is_outbound() {
- panic!("Tried to send accept_channel for an outbound channel?");
- }
- if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
- panic!("Tried to send accept_channel after channel had moved forward");
+ fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
+ log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
+ // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
+ // fail to generate this, we still are at least at a position where upgrading their status
+ // is acceptable.
+ for htlc in self.context.pending_inbound_htlcs.iter_mut() {
+ let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
+ Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
+ } else { None };
+ if let Some(state) = new_state {
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ htlc.state = state;
+ }
}
- if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Tried to send an accept_channel for a channel that has already advanced");
+ for htlc in self.context.pending_outbound_htlcs.iter_mut() {
+ if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
+ log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
+ // Grab the preimage, if it exists, instead of cloning
+ let mut reason = OutboundHTLCOutcome::Success(None);
+ mem::swap(outcome, &mut reason);
+ htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
+ }
}
- if !self.context.inbound_awaiting_accept {
- panic!("The inbound channel has already been accepted");
+ if let Some((feerate, update_state)) = self.context.pending_update_fee {
+ if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
+ debug_assert!(!self.context.is_outbound());
+ log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
+ self.context.feerate_per_kw = feerate;
+ self.context.pending_update_fee = None;
+ }
}
+ self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
- self.context.user_id = user_id;
- self.context.inbound_awaiting_accept = false;
-
- self.generate_accept_channel_message()
- }
-
- /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
- /// inbound channel. If the intention is to accept an inbound channel, use
- /// [`Channel::accept_inbound_channel`] instead.
- ///
- /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
- let first_per_commitment_point = self.context.holder_signer.get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
- let keys = self.context.get_holder_pubkeys();
+ let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
+ let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
+ htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
- msgs::AcceptChannel {
- temporary_channel_id: self.context.channel_id,
- dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
- channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
- htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
- minimum_depth: self.context.minimum_depth.unwrap(),
- to_self_delay: self.context.get_holder_selected_contest_delay(),
- max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
- funding_pubkey: keys.funding_pubkey,
- revocation_basepoint: keys.revocation_basepoint,
- payment_point: keys.payment_point,
- delayed_payment_basepoint: keys.delayed_payment_basepoint,
- htlc_basepoint: keys.htlc_basepoint,
- first_per_commitment_point,
- shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
- Some(script) => script.clone().into_inner(),
- None => Builder::new().into_script(),
- }),
- channel_type: Some(self.context.channel_type.clone()),
- #[cfg(taproot)]
- next_local_nonce: None,
+ if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
+ self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
}
- }
- /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
- /// inbound channel without accepting it.
- ///
- /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
- #[cfg(test)]
- pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
- self.generate_accept_channel_message()
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
+ commitment_txid: counterparty_commitment_txid,
+ htlc_outputs: htlcs.clone(),
+ commitment_number: self.context.cur_counterparty_commitment_transaction_number,
+ their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap()
+ }]
+ };
+ self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+ monitor_update
}
- /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created)
- fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
+ fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
let counterparty_keys = self.context.build_remote_transaction_keys();
- let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
- Ok(self.context.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
- }
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+ let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
- /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
- /// a funding_created message for the remote peer.
- /// Panics if called at some time other than immediately after initial handshake, if called twice,
- /// or if called on an inbound channel.
- /// Note that channel_id changes during this call!
- /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
- /// If an Err is returned, it is a ChannelError::Close.
- pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
- if !self.context.is_outbound() {
- panic!("Tried to create outbound funding_created message on an inbound channel!");
- }
- if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
- panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
- }
- if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
- self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
- self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
- panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
+ #[cfg(any(test, fuzzing))]
+ {
+ if !self.context.is_outbound() {
+ let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
+ *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ if let Some(info) = projected_commit_tx_info {
+ let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
+ if info.total_pending_htlcs == total_pending_htlcs
+ && info.next_holder_htlc_id == self.context.next_holder_htlc_id
+ && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
+ && info.feerate == self.context.feerate_per_kw {
+ let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors());
+ assert_eq!(actual_fee, info.fee);
+ }
+ }
+ }
}
- self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
- self.context.holder_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
+ (counterparty_commitment_txid, commitment_stats.htlcs_included)
+ }
+
+ /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
+ /// generation when we shouldn't change HTLC/channel state.
+ fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+ // Get the fee tests from `build_commitment_no_state_update`
+ #[cfg(any(test, fuzzing))]
+ self.build_commitment_no_state_update(logger);
+
+ let counterparty_keys = self.context.build_remote_transaction_keys();
+ let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
+ let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+ let (signature, htlc_signatures);
- let signature = match self.get_outbound_funding_created_signature(logger) {
- Ok(res) => res,
- Err(e) => {
- log_error!(logger, "Got bad signatures: {:?}!", e);
- self.context.channel_transaction_parameters.funding_outpoint = None;
- return Err(e);
+ {
+ let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
+ for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
+ htlcs.push(htlc);
}
- };
- let temporary_channel_id = self.context.channel_id;
+ let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
+ .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
+ signature = res.0;
+ htlc_signatures = res.1;
- // Now that we're past error-generating stuff, update our local state:
+ log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
+ encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
+ &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
+ log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
- self.context.channel_state = ChannelState::FundingCreated as u32;
- self.context.channel_id = funding_txo.to_channel_id();
- self.context.funding_transaction = Some(funding_transaction);
+ for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
+ log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
+ encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
+ encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
+ log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
+ log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+ }
+ }
- Ok(msgs::FundingCreated {
- temporary_channel_id,
- funding_txid: funding_txo.txid,
- funding_output_index: funding_txo.index,
+ Ok((msgs::CommitmentSigned {
+ channel_id: self.context.channel_id,
signature,
+ htlc_signatures,
#[cfg(taproot)]
partial_signature_with_nonce: None,
- #[cfg(taproot)]
- next_local_nonce: None,
- })
+ }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
}
- /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
- /// announceable and available for use (have exchanged ChannelReady messages in both
- /// directions). Should be used for both broadcasted announcements and in response to an
- /// AnnouncementSignatures message from the remote peer.
- ///
- /// Will only fail if we're not in a state where channel_announcement may be sent (including
- /// closing).
+ /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
+ /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
///
- /// This will only return ChannelError::Ignore upon failure.
- fn get_channel_announcement<NS: Deref>(
- &self, node_signer: &NS, chain_hash: BlockHash, user_config: &UserConfig,
- ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
- if !self.context.config.announced_channel {
- return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
- }
- if !self.context.is_usable() {
- return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
+ /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
+ /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
+ pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
+ let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
+ if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
+ match send_res? {
+ Some(_) => {
+ let monitor_update = self.build_commitment_no_status_check(logger);
+ self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
+ Ok(self.push_ret_blockable_mon_update(monitor_update))
+ },
+ None => Ok(None)
}
+ }
- let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
- .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
- let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
- let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
-
- let msg = msgs::UnsignedChannelAnnouncement {
- features: channelmanager::provided_channel_features(&user_config),
- chain_hash,
- short_channel_id: self.context.get_short_channel_id().unwrap(),
- node_id_1: if were_node_one { node_id } else { counterparty_node_id },
- node_id_2: if were_node_one { counterparty_node_id } else { node_id },
- bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
- bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
- excess_data: Vec::new(),
- };
+ pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
+ if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
+ return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
+ }
+ self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
+ fee_base_msat: msg.contents.fee_base_msat,
+ fee_proportional_millionths: msg.contents.fee_proportional_millionths,
+ cltv_expiry_delta: msg.contents.cltv_expiry_delta
+ });
- Ok(msg)
+ Ok(())
}
- fn get_announcement_sigs<NS: Deref, L: Deref>(
- &mut self, node_signer: &NS, genesis_block_hash: BlockHash, user_config: &UserConfig,
- best_block_height: u32, logger: &L
- ) -> Option<msgs::AnnouncementSignatures>
- where
- NS::Target: NodeSigner,
- L::Target: Logger
- {
- if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
- return None;
+ /// Begins the shutdown process, getting a message for the remote peer and returning all
+ /// holding cell HTLCs for payment failure.
+ ///
+ /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
+ /// [`ChannelMonitorUpdate`] will be returned).
+ pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
+ target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
+ -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
+ where SP::Target: SignerProvider {
+ for htlc in self.context.pending_outbound_htlcs.iter() {
+ if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
+ return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
+ }
}
-
- if !self.context.is_usable() {
- return None;
+ if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
+ if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
+ return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
+ }
+ else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
+ return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
+ }
}
-
- if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
- log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
- return None;
+ if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
+ return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
+ }
+ assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
+ if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
+ return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
}
- if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
- return None;
+ // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
+ // script is set, we just force-close and call it a day.
+ let mut chan_closed = false;
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ chan_closed = true;
}
- log_trace!(logger, "Creating an announcement_signatures message for channel {}", log_bytes!(self.context.channel_id()));
- let announcement = match self.get_channel_announcement(node_signer, genesis_block_hash, user_config) {
- Ok(a) => a,
- Err(e) => {
- log_trace!(logger, "{:?}", e);
- return None;
- }
- };
- let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
- Err(_) => {
- log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
- return None;
- },
- Ok(v) => v
- };
- let our_bitcoin_sig = match self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
- Err(_) => {
- log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
- return None;
+ let update_shutdown_script = match self.context.shutdown_scriptpubkey {
+ Some(_) => false,
+ None if !chan_closed => {
+ // use override shutdown script if provided
+ let shutdown_scriptpubkey = match override_shutdown_script {
+ Some(script) => script,
+ None => {
+ // otherwise, use the shutdown scriptpubkey provided by the signer
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => scriptpubkey,
+ Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
+ },
+ };
+ if !shutdown_scriptpubkey.is_compatible(their_features) {
+ return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+ }
+ self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
+ true
},
- Ok(v) => v
+ None => false,
};
- self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
-
- Some(msgs::AnnouncementSignatures {
- channel_id: self.context.channel_id(),
- short_channel_id: self.context.get_short_channel_id().unwrap(),
- node_signature: our_node_sig,
- bitcoin_signature: our_bitcoin_sig,
- })
- }
-
- /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
- /// available.
- fn sign_channel_announcement<NS: Deref>(
- &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
- ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
- if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
- let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
- .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
- let were_node_one = announcement.node_id_1 == our_node_key;
- let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
- .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
- let our_bitcoin_sig = self.context.holder_signer.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
- Ok(msgs::ChannelAnnouncement {
- node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
- node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
- bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
- bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
- contents: announcement,
- })
+ // From here on out, we may not fail!
+ self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
+ if self.context.channel_state < ChannelState::FundingSent as u32 {
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
} else {
- Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
+ self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
}
- }
+ self.context.update_time_counter += 1;
+
+ let monitor_update = if update_shutdown_script {
+ self.context.latest_monitor_update_id += 1;
+ let monitor_update = ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ }],
+ };
+ self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
+ if self.push_blockable_mon_update(monitor_update) {
+ self.context.pending_monitor_updates.last().map(|upd| &upd.update)
+ } else { None }
+ } else { None };
+ let shutdown = msgs::Shutdown {
+ channel_id: self.context.channel_id,
+ scriptpubkey: self.get_closing_scriptpubkey(),
+ };
- /// Processes an incoming announcement_signatures message, providing a fully-signed
- /// channel_announcement message which we can broadcast and storing our counterparty's
- /// signatures for later reconstruction/rebroadcast of the channel_announcement.
- pub fn announcement_signatures<NS: Deref>(
- &mut self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32,
- msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
- ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
- let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
+ // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
+ // our shutdown until we've committed all of the pending changes.
+ self.context.holding_cell_update_fee = None;
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
+ self.context.holding_cell_htlc_updates.retain(|htlc_update| {
+ match htlc_update {
+ &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
+ dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
+ false
+ },
+ _ => true
+ }
+ });
- let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
+ debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
+ "we can't both complete shutdown and return a monitor update");
- if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
- return Err(ChannelError::Close(format!(
- "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
- &announcement, self.context.get_counterparty_node_id())));
- }
- if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
- return Err(ChannelError::Close(format!(
- "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
- &announcement, self.context.counterparty_funding_pubkey())));
- }
+ Ok((shutdown, monitor_update, dropped_outbound_htlcs))
+ }
- self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
- if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
- return Err(ChannelError::Ignore(
- "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
+ /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
+ /// shutdown of this channel - no more calls into this Channel may be made afterwards except
+ /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
+ /// Also returns the list of payment_hashes for channels which we can safely fail backwards
+ /// immediately (others we will have to allow to time out).
+ pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
+ // Note that we MUST only generate a monitor update that indicates force-closure - we're
+ // called during initialization prior to the chain_monitor in the encompassing ChannelManager
+ // being fully configured in some cases. Thus, its likely any monitor events we generate will
+ // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
+ assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32);
+
+ // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
+ // return them to fail the payment.
+ let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
+ let counterparty_node_id = self.context.get_counterparty_node_id();
+ for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
+ match htlc_update {
+ HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
+ dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id));
+ },
+ _ => {}
+ }
}
+ let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
+ // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
+ // returning a channel monitor update here would imply a channel monitor update before
+ // we even registered the channel monitor to begin with, which is invalid.
+ // Thus, if we aren't actually at a point where we could conceivably broadcast the
+ // funding transaction, don't return a funding txo (which prevents providing the
+ // monitor update to the user, even if we return one).
+ // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
+ if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
+ self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
+ Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
+ update_id: self.context.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+ }))
+ } else { None }
+ } else { None };
- self.sign_channel_announcement(node_signer, announcement)
+ self.context.channel_state = ChannelState::ShutdownComplete as u32;
+ self.context.update_time_counter += 1;
+ (monitor_update, dropped_outbound_htlcs)
}
- /// Gets a signed channel_announcement for this channel, if we previously received an
- /// announcement_signatures from our counterparty.
- pub fn get_signed_channel_announcement<NS: Deref>(
- &self, node_signer: &NS, chain_hash: BlockHash, best_block_height: u32, user_config: &UserConfig
- ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
- if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
- return None;
- }
- let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
- Ok(res) => res,
- Err(_) => return None,
- };
- match self.sign_channel_announcement(node_signer, announcement) {
- Ok(res) => Some(res),
- Err(_) => None,
- }
+ pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
+ self.context.holding_cell_htlc_updates.iter()
+ .flat_map(|htlc_update| {
+ match htlc_update {
+ HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
+ => Some((source, payment_hash)),
+ _ => None,
+ }
+ })
+ .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
}
+}
- /// May panic if called on a channel that wasn't immediately-previously
- /// self.remove_uncommitted_htlcs_and_mark_paused()'d
- pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
- assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
- assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
- // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
- // current to_remote balances. However, it no longer has any use, and thus is now simply
- // set to a dummy (but valid, as required by the spec) public key.
- // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
- // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
- // valid, and valid in fuzzing mode's arbitrary validity criteria:
- let mut pk = [2; 33]; pk[1] = 0xff;
- let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
- let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
- let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
- log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.context.channel_id()));
- remote_last_secret
- } else {
- log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.context.channel_id()));
- [0;32]
- };
- self.mark_awaiting_response();
- msgs::ChannelReestablish {
- channel_id: self.context.channel_id(),
- // The protocol has two different commitment number concepts - the "commitment
- // transaction number", which starts from 0 and counts up, and the "revocation key
- // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
- // commitment transaction numbers by the index which will be used to reveal the
- // revocation key for that commitment transaction, which means we have to convert them
- // to protocol-level commitment numbers here...
+/// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
+pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
+ pub context: ChannelContext<Signer>,
+}
- // next_local_commitment_number is the next commitment_signed number we expect to
- // receive (indicating if they need to resend one that we missed).
- next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
- // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
- // receive, however we track it by the next commitment number for a remote transaction
- // (which is one further, as they always revoke previous commitment transaction, not
- // the one we send) so we have to decrement by 1. Note that if
- // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
- // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
- // overflow here.
- next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
- your_last_per_commitment_secret: remote_last_secret,
- my_current_per_commitment_point: dummy_pubkey,
- // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
- // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
- // txid of that interactive transaction, else we MUST NOT set it.
- next_funding_txid: None,
+impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
+ fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
+ // The default channel type (ie the first one we try) depends on whether the channel is
+ // public - if it is, we just go with `only_static_remotekey` as it's the only option
+ // available. If it's private, we first try `scid_privacy` as it provides better privacy
+ // with no other changes, and fall back to `only_static_remotekey`.
+ let mut ret = ChannelTypeFeatures::only_static_remote_key();
+ if !config.channel_handshake_config.announced_channel &&
+ config.channel_handshake_config.negotiate_scid_privacy &&
+ their_features.supports_scid_privacy() {
+ ret.set_scid_privacy_required();
}
- }
-
- // Send stuff to our remote peers:
+ // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
+ // set it now. If they don't understand it, we'll fall back to our default of
+ // `only_static_remotekey`.
+ #[cfg(anchors)]
+ { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
+ if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
+ their_features.supports_anchors_zero_fee_htlc_tx() {
+ ret.set_anchors_zero_fee_htlc_tx_required();
+ }
+ }
- /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
- /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
- /// commitment update.
- ///
- /// `Err`s will only be [`ChannelError::Ignore`].
- pub fn queue_add_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
- onion_routing_packet: msgs::OnionPacket, logger: &L)
- -> Result<(), ChannelError> where L::Target: Logger {
- self
- .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true, logger)
- .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
- .map_err(|err| {
- if let ChannelError::Ignore(_) = err { /* fine */ }
- else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
- err
- })
+ ret
}
- /// Adds a pending outbound HTLC to this channel, note that you probably want
- /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
- ///
- /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
- /// the wire:
- /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
- /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
- /// awaiting ACK.
- /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
- /// we may not yet have sent the previous commitment update messages and will need to
- /// regenerate them.
- ///
- /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
- /// on this [`Channel`] if `force_holding_cell` is false.
- ///
- /// `Err`s will only be [`ChannelError::Ignore`].
- fn send_htlc<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
- onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, logger: &L)
- -> Result<Option<msgs::UpdateAddHTLC>, ChannelError> where L::Target: Logger {
- if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
- return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
+ pub fn new_outbound<ES: Deref, SP: Deref, F: Deref>(
+ fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
+ channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
+ outbound_scid_alias: u64
+ ) -> Result<Channel<Signer>, APIError>
+ where ES::Target: EntropySource,
+ SP::Target: SignerProvider<Signer = Signer>,
+ F::Target: FeeEstimator,
+ {
+ let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
+ let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
+
+ if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
}
- let channel_total_msat = self.context.channel_value_satoshis * 1000;
- if amount_msat > channel_total_msat {
- return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
+ if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
+ }
+ let channel_value_msat = channel_value_satoshis * 1000;
+ if push_msat > channel_value_msat {
+ return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
+ }
+ if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
+ return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
+ }
+ let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
+ if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ // Protocol level safety check in place, although it should never happen because
+ // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+ return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
}
- if amount_msat == 0 {
- return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
- }
+ let channel_type = Self::get_initial_channel_type(&config, their_features);
+ debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
- let available_balances = self.context.get_available_balances();
- if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
- return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
- available_balances.next_outbound_htlc_minimum_msat)));
- }
+ let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
- if amount_msat > available_balances.next_outbound_htlc_limit_msat {
- return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
- available_balances.next_outbound_htlc_limit_msat)));
+ let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
+ let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
+ if value_to_self_msat < commitment_tx_fee {
+ return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
}
- if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
- // Note that this should never really happen, if we're !is_live() on receipt of an
- // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
- // the user to send directly into a !is_live() channel. However, if we
- // disconnected during the time the previous hop was doing the commitment dance we may
- // end up getting here after the forwarding delay. In any case, returning an
- // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
- return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
- }
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
- let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
- log_debug!(logger, "Pushing new outbound HTLC for {} msat {}", amount_msat,
- if force_holding_cell { "into holding cell" }
- else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
- else { "to peer" });
+ let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
+ match signer_provider.get_shutdown_scriptpubkey() {
+ Ok(scriptpubkey) => Some(scriptpubkey),
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ }
+ } else { None };
- if need_holding_cell {
- force_holding_cell = true;
+ if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
+ if !shutdown_scriptpubkey.is_compatible(&their_features) {
+ return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+ }
}
- // Now update local state:
- if force_holding_cell {
- self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
- amount_msat,
- payment_hash,
- cltv_expiry,
- source,
- onion_routing_packet,
- });
- return Ok(None);
- }
+ let destination_script = match signer_provider.get_destination_script() {
+ Ok(script) => script,
+ Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+ };
- self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
- htlc_id: self.context.next_holder_htlc_id,
- amount_msat,
- payment_hash: payment_hash.clone(),
- cltv_expiry,
- state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
- source,
- });
+ let temporary_channel_id = entropy_source.get_secure_random_bytes();
- let res = msgs::UpdateAddHTLC {
- channel_id: self.context.channel_id,
- htlc_id: self.context.next_holder_htlc_id,
- amount_msat,
- payment_hash,
- cltv_expiry,
- onion_routing_packet,
- };
- self.context.next_holder_htlc_id += 1;
+ Ok(Channel {
+ context: ChannelContext {
+ user_id,
- Ok(Some(res))
- }
+ config: LegacyChannelConfig {
+ options: config.channel_config.clone(),
+ announced_channel: config.channel_handshake_config.announced_channel,
+ commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
+ },
- fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
- log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
- // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
- // fail to generate this, we still are at least at a position where upgrading their status
- // is acceptable.
- for htlc in self.context.pending_inbound_htlcs.iter_mut() {
- let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
- Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
- } else { None };
- if let Some(state) = new_state {
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
- htlc.state = state;
- }
- }
- for htlc in self.context.pending_outbound_htlcs.iter_mut() {
- if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
- log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0));
- // Grab the preimage, if it exists, instead of cloning
- let mut reason = OutboundHTLCOutcome::Success(None);
- mem::swap(outcome, &mut reason);
- htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
- }
- }
- if let Some((feerate, update_state)) = self.context.pending_update_fee {
- if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
- debug_assert!(!self.context.is_outbound());
- log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
- self.context.feerate_per_kw = feerate;
- self.context.pending_update_fee = None;
- }
- }
- self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
+ prev_config: None,
- let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger);
- let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
- htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
+ inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
- if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
- self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
- }
+ channel_id: temporary_channel_id,
+ temporary_channel_id: Some(temporary_channel_id),
+ channel_state: ChannelState::OurInitSent as u32,
+ announcement_sigs_state: AnnouncementSigsState::NotSent,
+ secp_ctx,
+ channel_value_satoshis,
- self.context.latest_monitor_update_id += 1;
- let monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
- commitment_txid: counterparty_commitment_txid,
- htlc_outputs: htlcs.clone(),
- commitment_number: self.context.cur_counterparty_commitment_transaction_number,
- their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap()
- }]
- };
- self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
- monitor_update
- }
+ latest_monitor_update_id: 0,
- fn build_commitment_no_state_update<L: Deref>(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
- let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
+ holder_signer,
+ shutdown_scriptpubkey,
+ destination_script,
- #[cfg(any(test, fuzzing))]
- {
- if !self.context.is_outbound() {
- let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
- *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
- if let Some(info) = projected_commit_tx_info {
- let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
- if info.total_pending_htlcs == total_pending_htlcs
- && info.next_holder_htlc_id == self.context.next_holder_htlc_id
- && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
- && info.feerate == self.context.feerate_per_kw {
- let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.opt_anchors());
- assert_eq!(actual_fee, info.fee);
- }
- }
- }
- }
+ cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
+ value_to_self_msat,
- (counterparty_commitment_txid, commitment_stats.htlcs_included)
- }
+ pending_inbound_htlcs: Vec::new(),
+ pending_outbound_htlcs: Vec::new(),
+ holding_cell_htlc_updates: Vec::new(),
+ pending_update_fee: None,
+ holding_cell_update_fee: None,
+ next_holder_htlc_id: 0,
+ next_counterparty_htlc_id: 0,
+ update_time_counter: 1,
- /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
- /// generation when we shouldn't change HTLC/channel state.
- fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
- // Get the fee tests from `build_commitment_no_state_update`
- #[cfg(any(test, fuzzing))]
- self.build_commitment_no_state_update(logger);
+ resend_order: RAACommitmentOrder::CommitmentFirst,
- let counterparty_keys = self.context.build_remote_transaction_keys();
- let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
- let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
- let (signature, htlc_signatures);
+ monitor_pending_channel_ready: false,
+ monitor_pending_revoke_and_ack: false,
+ monitor_pending_commitment_signed: false,
+ monitor_pending_forwards: Vec::new(),
+ monitor_pending_failures: Vec::new(),
+ monitor_pending_finalized_fulfills: Vec::new(),
- {
- let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
- for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
- htlcs.push(htlc);
- }
+ #[cfg(debug_assertions)]
+ holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+ #[cfg(debug_assertions)]
+ counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
- let res = self.context.holder_signer.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
- .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
- signature = res.0;
- htlc_signatures = res.1;
+ last_sent_closing_fee: None,
+ pending_counterparty_closing_signed: None,
+ closing_fee_limits: None,
+ target_closing_feerate_sats_per_kw: None,
- log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
- encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
- &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
- log_bytes!(signature.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
+ inbound_awaiting_accept: false,
- for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
- log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
- encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, self.context.opt_anchors(), false, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
- encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, self.context.opt_anchors(), &counterparty_keys)),
- log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
- log_bytes!(htlc_sig.serialize_compact()[..]), log_bytes!(self.context.channel_id()));
- }
- }
+ funding_tx_confirmed_in: None,
+ funding_tx_confirmation_height: 0,
+ short_channel_id: None,
+ channel_creation_height: current_chain_height,
- Ok((msgs::CommitmentSigned {
- channel_id: self.context.channel_id,
- signature,
- htlc_signatures,
- #[cfg(taproot)]
- partial_signature_with_nonce: None,
- }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
- }
+ feerate_per_kw: feerate,
+ counterparty_dust_limit_satoshis: 0,
+ holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
+ counterparty_max_htlc_value_in_flight_msat: 0,
+ holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
+ counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
+ holder_selected_channel_reserve_satoshis,
+ counterparty_htlc_minimum_msat: 0,
+ holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
+ counterparty_max_accepted_htlcs: 0,
+ holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
+ minimum_depth: None, // Filled in in accept_channel
- /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
- /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
- ///
- /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
- /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
- pub fn send_htlc_and_commit<L: Deref>(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result<Option<&ChannelMonitorUpdate>, ChannelError> where L::Target: Logger {
- let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger);
- if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
- match send_res? {
- Some(_) => {
- let monitor_update = self.build_commitment_no_status_check(logger);
- self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
- Ok(self.push_ret_blockable_mon_update(monitor_update))
- },
- None => Ok(None)
- }
- }
+ counterparty_forwarding_info: None,
- pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<(), ChannelError> {
- if msg.contents.htlc_minimum_msat >= self.context.channel_value_satoshis * 1000 {
- return Err(ChannelError::Close("Minimum htlc value is greater than channel value".to_string()));
- }
- self.context.counterparty_forwarding_info = Some(CounterpartyForwardingInfo {
- fee_base_msat: msg.contents.fee_base_msat,
- fee_proportional_millionths: msg.contents.fee_proportional_millionths,
- cltv_expiry_delta: msg.contents.cltv_expiry_delta
- });
+ channel_transaction_parameters: ChannelTransactionParameters {
+ holder_pubkeys: pubkeys,
+ holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
+ is_outbound_from_holder: true,
+ counterparty_parameters: None,
+ funding_outpoint: None,
+ opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None },
+ opt_non_zero_fee_anchors: None
+ },
+ funding_transaction: None,
- Ok(())
- }
+ counterparty_cur_commitment_point: None,
+ counterparty_prev_commitment_point: None,
+ counterparty_node_id,
- /// Begins the shutdown process, getting a message for the remote peer and returning all
- /// holding cell HTLCs for payment failure.
- ///
- /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
- /// [`ChannelMonitorUpdate`] will be returned).
- pub fn get_shutdown<SP: Deref>(&mut self, signer_provider: &SP, their_features: &InitFeatures,
- target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
- -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
- where SP::Target: SignerProvider {
- for htlc in self.context.pending_outbound_htlcs.iter() {
- if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
- return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
- }
- }
- if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
- if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
- return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
- }
- else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
- return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
- }
- }
- if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
- return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
- }
- assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
- if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
- return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
- }
+ counterparty_shutdown_scriptpubkey: None,
- // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
- // script is set, we just force-close and call it a day.
- let mut chan_closed = false;
- if self.context.channel_state < ChannelState::FundingSent as u32 {
- chan_closed = true;
- }
+ commitment_secrets: CounterpartyCommitmentSecrets::new(),
- let update_shutdown_script = match self.context.shutdown_scriptpubkey {
- Some(_) => false,
- None if !chan_closed => {
- // use override shutdown script if provided
- let shutdown_scriptpubkey = match override_shutdown_script {
- Some(script) => script,
- None => {
- // otherwise, use the shutdown scriptpubkey provided by the signer
- match signer_provider.get_shutdown_scriptpubkey() {
- Ok(scriptpubkey) => scriptpubkey,
- Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
- }
- },
- };
- if !shutdown_scriptpubkey.is_compatible(their_features) {
- return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
- }
- self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
- true
- },
- None => false,
- };
+ channel_update_status: ChannelUpdateStatus::Enabled,
+ closing_signed_in_flight: false,
- // From here on out, we may not fail!
- self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
- if self.context.channel_state < ChannelState::FundingSent as u32 {
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- } else {
- self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
- }
- self.context.update_time_counter += 1;
+ announcement_sigs: None,
- let monitor_update = if update_shutdown_script {
- self.context.latest_monitor_update_id += 1;
- let monitor_update = ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
- scriptpubkey: self.get_closing_scriptpubkey(),
- }],
- };
- self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
- if self.push_blockable_mon_update(monitor_update) {
- self.context.pending_monitor_updates.last().map(|upd| &upd.update)
- } else { None }
- } else { None };
- let shutdown = msgs::Shutdown {
- channel_id: self.context.channel_id,
- scriptpubkey: self.get_closing_scriptpubkey(),
- };
+ #[cfg(any(test, fuzzing))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, fuzzing))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
- // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
- // our shutdown until we've committed all of the pending changes.
- self.context.holding_cell_update_fee = None;
- let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
- self.context.holding_cell_htlc_updates.retain(|htlc_update| {
- match htlc_update {
- &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
- dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
- false
- },
- _ => true
- }
- });
+ workaround_lnd_bug_4006: None,
+ sent_message_awaiting_response: None,
- debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
- "we can't both complete shutdown and return a monitor update");
+ latest_inbound_scid_alias: None,
+ outbound_scid_alias,
- Ok((shutdown, monitor_update, dropped_outbound_htlcs))
- }
+ channel_pending_event_emitted: false,
+ channel_ready_event_emitted: false,
- /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
- /// shutdown of this channel - no more calls into this Channel may be made afterwards except
- /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
- /// Also returns the list of payment_hashes for channels which we can safely fail backwards
- /// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
- // Note that we MUST only generate a monitor update that indicates force-closure - we're
- // called during initialization prior to the chain_monitor in the encompassing ChannelManager
- // being fully configured in some cases. Thus, its likely any monitor events we generate will
- // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
- assert!(self.context.channel_state != ChannelState::ShutdownComplete as u32);
+ #[cfg(any(test, fuzzing))]
+ historical_inbound_htlc_fulfills: HashSet::new(),
- // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
- // return them to fail the payment.
- let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
- let counterparty_node_id = self.context.get_counterparty_node_id();
- for htlc_update in self.context.holding_cell_htlc_updates.drain(..) {
- match htlc_update {
- HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
- dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.context.channel_id));
- },
- _ => {}
- }
- }
- let monitor_update = if let Some(funding_txo) = self.context.get_funding_txo() {
- // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
- // returning a channel monitor update here would imply a channel monitor update before
- // we even registered the channel monitor to begin with, which is invalid.
- // Thus, if we aren't actually at a point where we could conceivably broadcast the
- // funding transaction, don't return a funding txo (which prevents providing the
- // monitor update to the user, even if we return one).
- // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
- if self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
- self.context.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
- Some((self.context.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
- update_id: self.context.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
- }))
- } else { None }
- } else { None };
+ channel_type,
+ channel_keys_id,
- self.context.channel_state = ChannelState::ShutdownComplete as u32;
- self.context.update_time_counter += 1;
- (monitor_update, dropped_outbound_htlcs)
+ pending_monitor_updates: Vec::new(),
+ }
+ })
}
+}
- pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
- self.context.holding_cell_htlc_updates.iter()
- .flat_map(|htlc_update| {
- match htlc_update {
- HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
- => Some((source, payment_hash)),
- _ => None,
- }
- })
- .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
- }
+/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
+pub(super) struct InboundV1Channel<Signer: ChannelSigner> {
+ pub context: ChannelContext<Signer>,
}
-/// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
-pub(super) struct OutboundV1Channel<Signer: ChannelSigner> {
- pub context: ChannelContext<Signer>,
-}
+impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {
+ /// Creates a new channel from a remote sides' request for one.
+ /// Assumes chain_hash has already been checked and corresponds with what we expect!
+ pub fn new_from_req<ES: Deref, SP: Deref, F: Deref, L: Deref>(
+ fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
+ counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
+ their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
+ current_chain_height: u32, logger: &L, outbound_scid_alias: u64
+ ) -> Result<Channel<Signer>, ChannelError>
+ where ES::Target: EntropySource,
+ SP::Target: SignerProvider<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+ {
+ let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
+
+ // First check the channel type is known, failing before we do anything else if we don't
+ // support this channel type.
+ let channel_type = if let Some(channel_type) = &msg.channel_type {
+ if channel_type.supports_any_optional_bits() {
+ return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
+ }
+
+ // We only support the channel types defined by the `ChannelManager` in
+ // `provided_channel_type_features`. The channel type must always support
+ // `static_remote_key`.
+ if !channel_type.requires_static_remote_key() {
+ return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
+ }
+ // Make sure we support all of the features behind the channel type.
+ if !channel_type.is_subset(our_supported_features) {
+ return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
+ }
+ if channel_type.requires_scid_privacy() && announced_channel {
+ return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
+ }
+ channel_type.clone()
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ channel_type
+ };
+ let opt_anchors = channel_type.supports_anchors_zero_fee_htlc_tx();
+
+ let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
+ let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
+ let pubkeys = holder_signer.pubkeys().clone();
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: msg.funding_pubkey,
+ revocation_basepoint: msg.revocation_basepoint,
+ payment_point: msg.payment_point,
+ delayed_payment_basepoint: msg.delayed_payment_basepoint,
+ htlc_basepoint: msg.htlc_basepoint
+ };
-impl<Signer: WriteableEcdsaChannelSigner> OutboundV1Channel<Signer> {
- fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
- // The default channel type (ie the first one we try) depends on whether the channel is
- // public - if it is, we just go with `only_static_remotekey` as it's the only option
- // available. If it's private, we first try `scid_privacy` as it provides better privacy
- // with no other changes, and fall back to `only_static_remotekey`.
- let mut ret = ChannelTypeFeatures::only_static_remote_key();
- if !config.channel_handshake_config.announced_channel &&
- config.channel_handshake_config.negotiate_scid_privacy &&
- their_features.supports_scid_privacy() {
- ret.set_scid_privacy_required();
+ if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
+ return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
}
- // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
- // set it now. If they don't understand it, we'll fall back to our default of
- // `only_static_remotekey`.
- #[cfg(anchors)]
- { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
- if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
- their_features.supports_anchors_zero_fee_htlc_tx() {
- ret.set_anchors_zero_fee_htlc_tx_required();
- }
+ // Check sanity of message fields:
+ if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
+ return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
}
+ if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
+ }
+ if msg.channel_reserve_satoshis > msg.funding_satoshis {
+ return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
+ }
+ let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
+ if msg.push_msat > full_channel_value_msat {
+ return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
+ }
+ if msg.dust_limit_satoshis > msg.funding_satoshis {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
+ }
+ if msg.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
+ }
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw, None, logger)?;
- ret
- }
-
- pub fn new_outbound<ES: Deref, SP: Deref, F: Deref>(
- fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
- channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
- outbound_scid_alias: u64
- ) -> Result<Channel<Signer>, APIError>
- where ES::Target: EntropySource,
- SP::Target: SignerProvider<Signer = Signer>,
- F::Target: FeeEstimator,
- {
- let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
- let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
- let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
- let pubkeys = holder_signer.pubkeys().clone();
+ let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ if msg.to_self_delay > max_counterparty_selected_contest_delay {
+ return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
+ }
+ if msg.max_accepted_htlcs < 1 {
+ return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ }
+ if msg.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
+ }
- if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
- return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
+ // Now check against optional parameters as set by config...
+ if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
+ return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
}
- if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
- return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
+ if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
}
- let channel_value_msat = channel_value_satoshis * 1000;
- if push_msat > channel_value_msat {
- return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
+ if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
}
- if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
- return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
+ if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
}
- let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
+ if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
+ }
+ if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+
+ // Convert things into internal flags and prep our state:
+
+ if config.channel_handshake_limits.force_announced_channel_preference {
+ if config.channel_handshake_config.announced_channel != announced_channel {
+ return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
+ }
+ }
+
+ let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
// Protocol level safety check in place, although it should never happen because
// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
- return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
+ }
+ if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
+ msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
+ }
+ if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
+ return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
}
- let channel_type = Self::get_initial_channel_type(&config, their_features);
- debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
-
- let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
+ // check if the funder's amount for the initial commitment tx is sufficient
+ // for full fee payment plus a few HTLCs to ensure the channel will be useful.
+ let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
+ let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors) / 1000;
+ if funders_amount_msat / 1000 < commitment_tx_fee {
+ return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", funders_amount_msat / 1000, commitment_tx_fee)));
+ }
- let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
- let commitment_tx_fee = commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, channel_type.requires_anchors_zero_fee_htlc_tx());
- if value_to_self_msat < commitment_tx_fee {
- return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
+ let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee;
+ // While it's reasonable for us to not meet the channel reserve initially (if they don't
+ // want to push much to us), our counterparty should always have more than our reserve.
+ if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
}
- let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+ let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+ match &msg.shutdown_scriptpubkey {
+ &Some(ref script) => {
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ if script.len() == 0 {
+ None
+ } else {
+ if !script::is_bolt2_compliant(&script, their_features) {
+ return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
+ }
+ Some(script.clone())
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &None => {
+ return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ }
+ }
+ } else { None };
let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
match signer_provider.get_shutdown_scriptpubkey() {
Ok(scriptpubkey) => Some(scriptpubkey),
- Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
+ Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
}
} else { None };
if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
if !shutdown_scriptpubkey.is_compatible(&their_features) {
- return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
+ return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
}
}
let destination_script = match signer_provider.get_destination_script() {
Ok(script) => script,
- Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
+ Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
};
- let temporary_channel_id = entropy_source.get_secure_random_bytes();
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
- Ok(Channel {
+ let chan = Channel {
context: ChannelContext {
user_id,
config: LegacyChannelConfig {
options: config.channel_config.clone(),
- announced_channel: config.channel_handshake_config.announced_channel,
+ announced_channel,
commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
},
prev_config: None,
- inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
+ inbound_handshake_limits_override: None,
- channel_id: temporary_channel_id,
- temporary_channel_id: Some(temporary_channel_id),
- channel_state: ChannelState::OurInitSent as u32,
+ temporary_channel_id: Some(msg.temporary_channel_id),
+ channel_id: msg.temporary_channel_id,
+ channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
announcement_sigs_state: AnnouncementSigsState::NotSent,
secp_ctx,
- channel_value_satoshis,
latest_monitor_update_id: 0,
cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
- value_to_self_msat,
+ value_to_self_msat: msg.push_msat,
pending_inbound_htlcs: Vec::new(),
pending_outbound_htlcs: Vec::new(),
monitor_pending_finalized_fulfills: Vec::new(),
#[cfg(debug_assertions)]
- holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+ holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
#[cfg(debug_assertions)]
- counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
+ counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
last_sent_closing_fee: None,
pending_counterparty_closing_signed: None,
closing_fee_limits: None,
target_closing_feerate_sats_per_kw: None,
- inbound_awaiting_accept: false,
+ inbound_awaiting_accept: true,
funding_tx_confirmed_in: None,
funding_tx_confirmation_height: 0,
short_channel_id: None,
channel_creation_height: current_chain_height,
- feerate_per_kw: feerate,
- counterparty_dust_limit_satoshis: 0,
+ feerate_per_kw: msg.feerate_per_kw,
+ channel_value_satoshis: msg.funding_satoshis,
+ counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
- counterparty_max_htlc_value_in_flight_msat: 0,
- holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
- counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
+ counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
+ holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
+ counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
holder_selected_channel_reserve_satoshis,
- counterparty_htlc_minimum_msat: 0,
+ counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
- counterparty_max_accepted_htlcs: 0,
+ counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
- minimum_depth: None, // Filled in in accept_channel
+ minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)),
counterparty_forwarding_info: None,
channel_transaction_parameters: ChannelTransactionParameters {
holder_pubkeys: pubkeys,
holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
- is_outbound_from_holder: true,
- counterparty_parameters: None,
+ is_outbound_from_holder: false,
+ counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: msg.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ }),
funding_outpoint: None,
- opt_anchors: if channel_type.requires_anchors_zero_fee_htlc_tx() { Some(()) } else { None },
+ opt_anchors: if opt_anchors { Some(()) } else { None },
opt_non_zero_fee_anchors: None
},
funding_transaction: None,
- counterparty_cur_commitment_point: None,
+ counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
counterparty_prev_commitment_point: None,
counterparty_node_id,
- counterparty_shutdown_scriptpubkey: None,
+ counterparty_shutdown_scriptpubkey,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
pending_monitor_updates: Vec::new(),
}
- })
- }
-}
+ };
-/// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
-pub(super) struct InboundV1Channel<Signer: ChannelSigner> {
- pub context: ChannelContext<Signer>,
+ Ok(chan)
+ }
}
-impl<Signer: WriteableEcdsaChannelSigner> InboundV1Channel<Signer> {}
-
const SERIALIZATION_VERSION: u8 = 3;
const MIN_SERIALIZATION_VERSION: u8 = 2;
use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
#[cfg(anchors)]
use crate::ln::channel::InitFeatures;
- use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
+ use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
use crate::ln::features::ChannelTypeFeatures;
use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
// Make sure A's dust limit is as we expect.
let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+ let mut node_b_chan = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
// Node B --> Node A: accept channel, explicitly setting B's dust limit.
let mut accept_channel_msg = node_b_chan.accept_inbound_channel(0);
// Create Node B's channel by receiving Node A's open_channel message
let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
+ let mut node_b_chan = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42).unwrap();
// Node B --> Node A: accept channel
let accept_channel_msg = node_b_chan.accept_inbound_channel(0);
// Test that `new_from_req` creates a channel with the correct value for
// `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
// which is set to the lower bound - 1 (2%) of the `channel_value`.
- let chan_3 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
+ let chan_3 = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, 42).unwrap();
let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
// Test with the upper bound - 1 of valid values (99%).
- let chan_4 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
+ let chan_4 = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, 42).unwrap();
let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
// Test that `new_from_req` uses the lower bound of the configurable percentage values (1%)
// if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
- let chan_7 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
+ let chan_7 = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, 42).unwrap();
let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
// Test that `new_from_req` uses the upper bound of the configurable percentage values
// (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
// than 100.
- let chan_8 = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
+ let chan_8 = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, 42).unwrap();
let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
}
inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
- let chan_inbound_node = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
+ let chan_inbound_node = InboundV1Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
} else {
// Channel Negotiations failed
- let result = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
+ let result = InboundV1Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
assert!(result.is_err());
}
}
let mut open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
open_channel_msg.channel_type = Some(channel_type_features);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let res = Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider,
+ let res = InboundV1Channel::<EnforcingSigner>::new_from_req(&feeest, &&keys_provider, &&keys_provider,
node_b_node_id, &channelmanager::provided_channel_type_features(&config),
&channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, 42);
assert!(res.is_ok());
).unwrap();
let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
- let channel_b = Channel::<EnforcingSigner>::new_from_req(
+ let channel_b = InboundV1Channel::<EnforcingSigner>::new_from_req(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
&open_channel_msg, 7, &config, 0, &&logger, 42
// Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
// `static_remote_key`, it will fail the channel.
- let channel_b = Channel::<EnforcingSigner>::new_from_req(
+ let channel_b = InboundV1Channel::<EnforcingSigner>::new_from_req(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
&open_channel_msg, 7, &config, 0, &&logger, 42
let mut open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
- let res = Channel::<EnforcingSigner>::new_from_req(
+ let res = InboundV1Channel::<EnforcingSigner>::new_from_req(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
&open_channel_msg, 7, &config, 0, &&logger, 42
let open_channel_msg = channel_a.get_open_channel(genesis_block(network).header.block_hash());
- let channel_b = Channel::<EnforcingSigner>::new_from_req(
+ let channel_b = InboundV1Channel::<EnforcingSigner>::new_from_req(
&fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
&channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
&open_channel_msg, 7, &config, 0, &&logger, 42