X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=4de551b72bfdf331d4ee64b15bbe8b69ee95d8bf;hb=refs%2Fheads%2Fupstream%2Fmain;hp=57fbe51626bec580a111f62335dfdc63d70771d5;hpb=08c566ccfb34454383f48880f22050cc9e9d1bb6;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 57fbe516..bb7d38a5 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -130,7 +130,7 @@ impl_writeable_tlv_based_enum!(InboundHTLCResolution, }, (2, Pending) => { (0, update_add_htlc, required), - }; + }, ); enum InboundHTLCState { @@ -906,8 +906,10 @@ pub(super) struct MonitorRestoreUpdates { #[allow(unused)] pub(super) struct SignerResumeUpdates { pub commitment_update: Option, + pub revoke_and_ack: Option, pub funding_signed: Option, pub channel_ready: Option, + pub order: RAACommitmentOrder, } /// The return value of `channel_reestablish` @@ -960,8 +962,12 @@ impl HolderCommitmentPoint { { HolderCommitmentPoint::Available { transaction_number: INITIAL_COMMITMENT_NUMBER, - current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx), - next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx), + // TODO(async_signing): remove this expect with the Uninitialized variant + current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx) + .expect("Signer must be able to provide initial commitment point"), + // TODO(async_signing): remove this expect with the Uninitialized variant + next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx) + .expect("Signer must be able to provide second commitment point"), } } @@ -990,7 +996,42 @@ impl HolderCommitmentPoint { } } - pub fn advance(&mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) + /// If we are pending the next commitment point, this method tries asking the signer again, + /// and transitions to the next state if successful. + /// + /// This method is used for the following transitions: + /// - `PendingNext` -> `Available` + pub fn try_resolve_pending(&mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L) + where SP::Target: SignerProvider, L::Target: Logger + { + if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self { + if let Ok(next) = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx) { + log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1); + *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next }; + } else { + log_trace!(logger, "Next per-commitment point {} is pending", transaction_number); + } + } + } + + /// If we are not pending the next commitment point, this method advances the commitment number + /// and requests the next commitment point from the signer. Returns `Ok` if we were at + /// `Available` and were able to advance our commitment number (even if we are still pending + /// the next commitment point). + /// + /// If our signer is not ready to provide the next commitment point, we will + /// only advance to `PendingNext`, and should be tried again later in `signer_unblocked` + /// via `try_resolve_pending`. + /// + /// If our signer is ready to provide the next commitment point, we will advance all the + /// way to `Available`. + /// + /// This method is used for the following transitions: + /// - `Available` -> `PendingNext` + /// - `Available` -> `PendingNext` -> `Available` (in one fell swoop) + pub fn advance( + &mut self, signer: &ChannelSignerType, secp_ctx: &Secp256k1, logger: &L + ) -> Result<(), ()> where SP::Target: SignerProvider, L::Target: Logger { if let HolderCommitmentPoint::Available { transaction_number, next, .. } = self { @@ -998,13 +1039,10 @@ impl HolderCommitmentPoint { transaction_number: *transaction_number - 1, current: *next, }; + self.try_resolve_pending(signer, secp_ctx, logger); + return Ok(()); } - - if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self { - let next = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx); - log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1); - *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next }; - } + Err(()) } } @@ -1215,6 +1253,14 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { monitor_pending_finalized_fulfills: Vec, monitor_pending_update_adds: Vec, + /// If we went to send a revoke_and_ack but our signer was unable to give us a signature, + /// we should retry at some point in the future when the signer indicates it may have a + /// signature for us. + /// + /// This may also be used to make sure we send a `revoke_and_ack` after a `commitment_signed` + /// if we need to maintain ordering of messages, but are pending the signer on a previous + /// message. + signer_pending_revoke_and_ack: bool, /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`]) /// but our signer (initially) refused to give us a signature, we should retry at some point in /// the future when the signer indicates it may have a signature for us. @@ -1683,6 +1729,7 @@ impl ChannelContext where SP::Target: SignerProvider { monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), + signer_pending_revoke_and_ack: false, signer_pending_commitment_update: false, signer_pending_funding: false, @@ -1774,7 +1821,7 @@ impl ChannelContext where SP::Target: SignerProvider { Ok(channel_context) } - fn new_for_outbound_channel<'a, ES: Deref, F: Deref>( + fn new_for_outbound_channel<'a, ES: Deref, F: Deref, L: Deref>( fee_estimator: &'a LowerBoundedFeeEstimator, entropy_source: &'a ES, signer_provider: &'a SP, @@ -1791,11 +1838,13 @@ impl ChannelContext where SP::Target: SignerProvider { channel_keys_id: [u8; 32], holder_signer: ::EcdsaSigner, pubkeys: ChannelPublicKeys, + _logger: L, ) -> Result, APIError> where ES::Target: EntropySource, F::Target: FeeEstimator, SP::Target: SignerProvider, + L::Target: Logger, { // This will be updated with the counterparty contribution if this is a dual-funded channel let channel_value_satoshis = funding_satoshis; @@ -1908,6 +1957,7 @@ impl ChannelContext where SP::Target: SignerProvider { monitor_pending_finalized_fulfills: Vec::new(), monitor_pending_update_adds: Vec::new(), + signer_pending_revoke_and_ack: false, signer_pending_commitment_update: false, signer_pending_funding: false, @@ -2118,8 +2168,8 @@ impl ChannelContext where SP::Target: SignerProvider { /// Returns the holder signer for this channel. #[cfg(test)] - pub fn get_signer(&self) -> &ChannelSignerType { - return &self.holder_signer + pub fn get_mut_signer(&mut self) -> &mut ChannelSignerType { + return &mut self.holder_signer } /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0, @@ -2146,6 +2196,143 @@ impl ChannelContext where SP::Target: SignerProvider { } } + /// Performs checks against necessary constraints after receiving either an `accept_channel` or + /// `accept_channel2` message. + pub fn do_accept_channel_checks( + &mut self, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures, + common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64, + ) -> Result<(), ChannelError> { + let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits }; + + // Check sanity of message fields: + if !self.is_outbound() { + return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned())); + } + if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) { + return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned())); + } + if common_fields.dust_limit_satoshis > 21000000 * 100000000 { + return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis))); + } + if channel_reserve_satoshis > self.channel_value_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, self.channel_value_satoshis))); + } + if common_fields.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis))); + } + if channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", + channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis))); + } + let full_channel_value_msat = (self.channel_value_satoshis - channel_reserve_satoshis) * 1000; + if common_fields.htlc_minimum_msat >= full_channel_value_msat { + return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat))); + } + let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + if common_fields.to_self_delay > max_delay_acceptable { + return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay))); + } + if common_fields.max_accepted_htlcs < 1 { + return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + } + if common_fields.max_accepted_htlcs > MAX_HTLCS { + return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, MAX_HTLCS))); + } + + // Now check against optional parameters as set by config... + if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { + return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); + } + if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); + } + if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { + return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); + } + if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { + return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); + } + if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + } + if common_fields.minimum_depth > peer_limits.max_minimum_depth { + return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth))); + } + + if let Some(ty) = &common_fields.channel_type { + if *ty != self.channel_type { + return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); + } + } else if their_features.supports_channel_type() { + // Assume they've accepted the channel type as they said they understand it. + } else { + let channel_type = ChannelTypeFeatures::from_init(&their_features); + if channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + } + self.channel_type = channel_type.clone(); + self.channel_transaction_parameters.channel_type_features = channel_type; + } + + let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { + match &common_fields.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); + } + Some(script.clone()) + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + } + } + } else { None }; + + self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis; + self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000); + self.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis); + self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat; + self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs; + + if peer_limits.trust_own_funding_0conf { + self.minimum_depth = Some(common_fields.minimum_depth); + } else { + self.minimum_depth = Some(cmp::max(1, common_fields.minimum_depth)); + } + + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: common_fields.funding_pubkey, + revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint), + payment_point: common_fields.payment_basepoint, + delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint), + htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint) + }; + + self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: common_fields.to_self_delay, + pubkeys: counterparty_pubkeys, + }); + + self.counterparty_cur_commitment_point = Some(common_fields.first_per_commitment_point); + self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + + self.channel_state = ChannelState::NegotiatingFunding( + NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT + ); + self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + + Ok(()) + } + /// Returns the block hash in which our funding transaction was confirmed. pub fn get_funding_tx_confirmed_in(&self) -> Option { self.funding_tx_confirmed_in @@ -2631,7 +2818,7 @@ impl ChannelContext where SP::Target: SignerProvider { feerate_per_kw = cmp::max(feerate_per_kw, feerate); } let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000); - cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value())) + cmp::max(feerate_per_kw.saturating_add(2530), feerate_plus_quarter.unwrap_or(u32::MAX)) } /// Get forwarding information for the counterparty. @@ -4446,7 +4633,18 @@ impl Channel where channel_id: Some(self.context.channel_id()), }; - self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // During post-funding channel operation, we only advance our point upon + // receiving a commitment_signed, and our counterparty cannot send us + // another commitment signed until we've provided a new commitment point + // in revoke_and_ack, which requires unblocking our signer and completing + // the advance to the next point. This should be unreachable since + // a new commitment_signed should fail at our signature checks above. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive commitment_signed"); + return Err(ChannelError::close("Failed to advance our commitment point".to_owned())); + } self.context.expecting_peer_commitment_signed = false; // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call // build_commitment_no_status_check() next which will reset this to RAAFirst. @@ -5164,12 +5362,23 @@ impl Channel where }; } - let raa = if self.context.monitor_pending_revoke_and_ack { - Some(self.get_last_revoke_and_ack()) + let mut raa = if self.context.monitor_pending_revoke_and_ack { + self.get_last_revoke_and_ack(logger) } else { None }; - let commitment_update = if self.context.monitor_pending_commitment_signed { + let mut commitment_update = if self.context.monitor_pending_commitment_signed { self.get_last_commitment_update_for_send(logger).ok() } else { None }; + if self.context.resend_order == RAACommitmentOrder::CommitmentFirst + && self.context.signer_pending_commitment_update && raa.is_some() { + self.context.signer_pending_revoke_and_ack = true; + raa = None; + } + if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst + && self.context.signer_pending_revoke_and_ack && commitment_update.is_some() { + self.context.signer_pending_commitment_update = true; + commitment_update = None; + } + if commitment_update.is_some() { self.mark_awaiting_response(); } @@ -5239,9 +5448,10 @@ impl Channel where /// blocked. #[cfg(async_signing)] pub fn signer_maybe_unblocked(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger { - let commitment_update = if self.context.signer_pending_commitment_update { - self.get_last_commitment_update_for_send(logger).ok() - } else { None }; + if !self.context.holder_commitment_point.is_available() { + log_trace!(logger, "Attempting to update holder per-commitment point..."); + self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); + } let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() { self.context.get_funding_signed_msg(logger).1 } else { None }; @@ -5249,30 +5459,73 @@ impl Channel where self.check_get_channel_ready(0, logger) } else { None }; - log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready", + let mut commitment_update = if self.context.signer_pending_commitment_update { + log_trace!(logger, "Attempting to generate pending commitment update..."); + self.get_last_commitment_update_for_send(logger).ok() + } else { None }; + let mut revoke_and_ack = if self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Attempting to generate pending revoke and ack..."); + self.get_last_revoke_and_ack(logger) + } else { None }; + + if self.context.resend_order == RAACommitmentOrder::CommitmentFirst + && self.context.signer_pending_commitment_update && revoke_and_ack.is_some() { + log_trace!(logger, "Signer unblocked for revoke and ack, but unable to send due to resend order, waiting on signer for commitment update"); + self.context.signer_pending_revoke_and_ack = true; + revoke_and_ack = None; + } + if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst + && self.context.signer_pending_revoke_and_ack && commitment_update.is_some() { + log_trace!(logger, "Signer unblocked for commitment update, but unable to send due to resend order, waiting on signer for revoke and ack"); + self.context.signer_pending_commitment_update = true; + commitment_update = None; + } + + log_trace!(logger, "Signer unblocked with {} commitment_update, {} revoke_and_ack, {} funding_signed and {} channel_ready, with resend order {:?}", if commitment_update.is_some() { "a" } else { "no" }, + if revoke_and_ack.is_some() { "a" } else { "no" }, if funding_signed.is_some() { "a" } else { "no" }, - if channel_ready.is_some() { "a" } else { "no" }); + if channel_ready.is_some() { "a" } else { "no" }, + self.context.resend_order); SignerResumeUpdates { commitment_update, + revoke_and_ack, funding_signed, channel_ready, + order: self.context.resend_order.clone(), } } - fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK { - debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER + 2); - // TODO: handle non-available case when get_per_commitment_point becomes async - debug_assert!(self.context.holder_commitment_point.is_available()); - let next_per_commitment_point = self.context.holder_commitment_point.current_point(); + fn get_last_revoke_and_ack(&mut self, logger: &L) -> Option where L::Target: Logger { + debug_assert!(self.context.holder_commitment_point.transaction_number() <= INITIAL_COMMITMENT_NUMBER - 2); + self.context.holder_commitment_point.try_resolve_pending(&self.context.holder_signer, &self.context.secp_ctx, logger); let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.holder_commitment_point.transaction_number() + 2); - msgs::RevokeAndACK { - channel_id: self.context.channel_id, - per_commitment_secret, - next_per_commitment_point, - #[cfg(taproot)] - next_local_nonce: None, + if let HolderCommitmentPoint::Available { transaction_number: _, current, next: _ } = self.context.holder_commitment_point { + self.context.signer_pending_revoke_and_ack = false; + Some(msgs::RevokeAndACK { + channel_id: self.context.channel_id, + per_commitment_secret, + next_per_commitment_point: current, + #[cfg(taproot)] + next_local_nonce: None, + }) + } else { + #[cfg(not(async_signing))] { + panic!("Holder commitment point must be Available when generating revoke_and_ack"); + } + #[cfg(async_signing)] { + // Technically if we're at HolderCommitmentPoint::PendingNext, + // we have a commitment point ready to send in an RAA, however we + // choose to wait since if we send RAA now, we could get another + // CS before we have any commitment point available. Blocking our + // RAA here is a convenient way to make sure that post-funding + // we're only ever waiting on one commitment point at a time. + log_trace!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available", + &self.context.channel_id(), self.context.holder_commitment_point.transaction_number()); + self.context.signer_pending_revoke_and_ack = true; + None + } } } @@ -5401,7 +5654,9 @@ impl Channel where let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.holder_commitment_point.transaction_number() - 1; if msg.next_remote_commitment_number > 0 { - let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx); + let expected_point = self.context.holder_signer.as_ref() + .get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for per commitment points upon channel reestablishment"); let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) .map_err(|_| ChannelError::close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) { @@ -5478,7 +5733,7 @@ impl Channel where self.context.monitor_pending_revoke_and_ack = true; None } else { - Some(self.get_last_revoke_and_ack()) + self.get_last_revoke_and_ack(logger) } } else { debug_assert!(false, "All values should have been handled in the four cases above"); @@ -5505,7 +5760,7 @@ impl Channel where } else { None }; if msg.next_local_commitment_number == next_counterparty_commitment_number { - if required_revoke.is_some() { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id()); @@ -5518,7 +5773,7 @@ impl Channel where order: self.context.resend_order.clone(), }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { - if required_revoke.is_some() { + if required_revoke.is_some() || self.context.signer_pending_revoke_and_ack { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id()); } else { log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id()); @@ -5532,10 +5787,25 @@ impl Channel where order: self.context.resend_order.clone(), }) } else { + let commitment_update = if self.context.resend_order == RAACommitmentOrder::RevokeAndACKFirst + && self.context.signer_pending_revoke_and_ack { + log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for revoke and ack", &self.context.channel_id()); + self.context.signer_pending_commitment_update = true; + None + } else { + self.get_last_commitment_update_for_send(logger).ok() + }; + let raa = if self.context.resend_order == RAACommitmentOrder::CommitmentFirst + && self.context.signer_pending_commitment_update && required_revoke.is_some() { + log_trace!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx, but unable to send due to resend order, waiting on signer for commitment update", &self.context.channel_id()); + self.context.signer_pending_revoke_and_ack = true; + None + } else { + required_revoke + }; Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: self.get_last_commitment_update_for_send(logger).ok(), + raa, commitment_update, order: self.context.resend_order.clone(), }) } @@ -7135,6 +7405,7 @@ impl Channel where channel_id: self.context.channel_id, signature, htlc_signatures, + batch: None, #[cfg(taproot)] partial_signature_with_nonce: None, }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) @@ -7297,13 +7568,14 @@ pub(super) struct OutboundV1Channel where SP::Target: SignerProvider } impl OutboundV1Channel where SP::Target: SignerProvider { - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, - outbound_scid_alias: u64, temporary_channel_id: Option + outbound_scid_alias: u64, temporary_channel_id: Option, logger: L ) -> Result, APIError> where ES::Target: EntropySource, - F::Target: FeeEstimator + F::Target: FeeEstimator, + L::Target: Logger, { let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config); if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { @@ -7335,6 +7607,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { channel_keys_id, holder_signer, pubkeys, + logger, )?, unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 } }; @@ -7497,136 +7770,11 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } // Message handlers - pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { - let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; - - // Check sanity of message fields: - if !self.context.is_outbound() { - return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned())); - } - if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) { - return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned())); - } - if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 { - return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); - } - if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", - msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); - } - let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; - if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat))); - } - let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); - if msg.common_fields.to_self_delay > max_delay_acceptable { - return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay))); - } - if msg.common_fields.max_accepted_htlcs < 1 { - return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); - } - if msg.common_fields.max_accepted_htlcs > MAX_HTLCS { - return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS))); - } - - // Now check against optional parameters as set by config... - if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { - return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); - } - if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); - } - if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { - return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); - } - if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { - return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); - } - if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth { - return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth))); - } - - if let Some(ty) = &msg.common_fields.channel_type { - if *ty != self.context.channel_type { - return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); - } - } else if their_features.supports_channel_type() { - // Assume they've accepted the channel type as they said they understand it. - } else { - let channel_type = ChannelTypeFeatures::from_init(&their_features); - if channel_type != ChannelTypeFeatures::only_static_remote_key() { - return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); - } - self.context.channel_type = channel_type.clone(); - self.context.channel_transaction_parameters.channel_type_features = channel_type; - } - - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &msg.common_fields.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); - } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); - } - } - } else { None }; - - self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis; - self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); - self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); - self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat; - self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs; - - if peer_limits.trust_own_funding_0conf { - self.context.minimum_depth = Some(msg.common_fields.minimum_depth); - } else { - self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth)); - } - - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.common_fields.funding_pubkey, - revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint), - payment_point: msg.common_fields.payment_basepoint, - delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint), - htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint) - }; - - self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.common_fields.to_self_delay, - pubkeys: counterparty_pubkeys, - }); - - self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point); - self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; - - self.context.channel_state = ChannelState::NegotiatingFunding( - NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT - ); - self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. - - Ok(()) + pub fn accept_channel( + &mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, + their_features: &InitFeatures + ) -> Result<(), ChannelError> { + self.context.do_accept_channel_checks(default_limits, their_features, &msg.common_fields, msg.channel_reserve_satoshis) } /// Handles a funding_signed message from the remote end. @@ -7713,7 +7861,14 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } else { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); } - self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // We cannot send open_channel before this has occurred, so if we + // err here by the time we receive funding_signed, something has gone wrong. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive funding_signed"); + return Err((self, ChannelError::close("Failed to advance holder commitment point".to_owned()))); + } self.context.cur_counterparty_commitment_transaction_number -= 1; log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); @@ -7980,7 +8135,14 @@ impl InboundV1Channel where SP::Target: SignerProvider { self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); self.context.cur_counterparty_commitment_transaction_number -= 1; - self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger); + if self.context.holder_commitment_point.advance(&self.context.holder_signer, &self.context.secp_ctx, logger).is_err() { + // We only fail to advance our commitment point/number if we're currently + // waiting for our signer to unblock and provide a commitment point. + // We cannot send accept_channel before this has occurred, so if we + // err here by the time we receive funding_created, something has gone wrong. + debug_assert!(false, "We should be ready to advance our commitment point by the time we receive funding_created"); + return Err((self, ChannelError::close("Failed to advance holder commitment point".to_owned()))); + } let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger); @@ -8032,14 +8194,15 @@ pub(super) struct OutboundV2Channel where SP::Target: SignerProvider #[cfg(any(dual_funding, splicing))] impl OutboundV2Channel where SP::Target: SignerProvider { - pub fn new( + pub fn new( fee_estimator: &LowerBoundedFeeEstimator, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64, user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64, - funding_confirmation_target: ConfirmationTarget, + funding_confirmation_target: ConfirmationTarget, logger: L, ) -> Result, APIError> where ES::Target: EntropySource, F::Target: FeeEstimator, + L::Target: Logger, { let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id); let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id); @@ -8071,6 +8234,7 @@ impl OutboundV2Channel where SP::Target: SignerProvider { channel_keys_id, holder_signer, pubkeys, + logger, )?, unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }, dual_funding_context: DualFundingChannelContext { @@ -8107,10 +8271,12 @@ impl OutboundV2Channel where SP::Target: SignerProvider { let first_per_commitment_point = self.context.holder_signer.as_ref() .get_per_commitment_point(self.context.holder_commitment_point.transaction_number(), - &self.context.secp_ctx); + &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); let second_per_commitment_point = self.context.holder_signer.as_ref() .get_per_commitment_point(self.context.holder_commitment_point.transaction_number() - 1, - &self.context.secp_ctx); + &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); let keys = self.context.get_holder_pubkeys(); msgs::OpenChannelV2 { @@ -8257,9 +8423,11 @@ impl InboundV2Channel where SP::Target: SignerProvider { /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2 fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 { let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point( - self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx); + self.context.holder_commitment_point.transaction_number(), &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point( - self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx); + self.context.holder_commitment_point.transaction_number() - 1, &self.context.secp_ctx) + .expect("TODO: async signing is not yet supported for commitment points in v2 channel establishment"); let keys = self.context.get_holder_pubkeys(); msgs::AcceptChannelV2 { @@ -8327,7 +8495,7 @@ fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) const SERIALIZATION_VERSION: u8 = 4; const MIN_SERIALIZATION_VERSION: u8 = 3; -impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,; +impl_writeable_tlv_based_enum_legacy!(InboundHTLCRemovalReason,; (0, FailRelay), (1, FailMalformed), (2, Fulfill), @@ -9211,14 +9379,16 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (Some(current), Some(next)) => HolderCommitmentPoint::Available { transaction_number: cur_holder_commitment_transaction_number, current, next }, - (Some(current), _) => HolderCommitmentPoint::Available { + (Some(current), _) => HolderCommitmentPoint::PendingNext { transaction_number: cur_holder_commitment_transaction_number, current, - next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx), }, - (_, _) => HolderCommitmentPoint::Available { - transaction_number: cur_holder_commitment_transaction_number, - current: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx), - next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx), + (_, _) => { + // TODO(async_signing): remove this expect with the Uninitialized variant + let current = holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx) + .expect("Must be able to derive the current commitment point upon channel restoration"); + HolderCommitmentPoint::PendingNext { + transaction_number: cur_holder_commitment_transaction_number, current, + } }, }; @@ -9266,6 +9436,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(), monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()), + signer_pending_revoke_and_ack: false, signer_pending_commitment_update: false, signer_pending_funding: false, @@ -9477,11 +9648,12 @@ mod tests { keys_provider.expect(OnGetShutdownScriptpubkey { returns: non_v0_segwit_shutdown_script.clone(), }); + let logger = test_utils::TestLogger::new(); let secp_ctx = Secp256k1::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) { + match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger) { Err(APIError::IncompatibleShutdownScript { script }) => { assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner()); }, @@ -9501,10 +9673,11 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Now change the fee so we can check that the fee in the open_channel message is the // same as the old fee. @@ -9531,7 +9704,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -9611,10 +9784,11 @@ mod tests { let seed = [42; 32]; let network = Network::Testnet; let keys_provider = test_utils::TestKeysInterface::new(&seed, network); + let logger = test_utils::TestLogger::new(); let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type()); let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type()); @@ -9663,7 +9837,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message let open_channel_msg = node_a_chan.get_open_channel(chain_hash); @@ -9727,12 +9901,12 @@ mod tests { // Test that `OutboundV1Channel::new` creates a channel with the correct value for // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value, // which is set to the lower bound + 1 (2%) of the `channel_value`. - let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap(); + let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None, &logger).unwrap(); let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000; assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64); // Test with the upper bound - 1 of valid values (99%). - let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap(); + let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None, &logger).unwrap(); let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000; assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64); @@ -9752,14 +9926,14 @@ mod tests { // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%) // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1. - let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap(); + let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None, &logger).unwrap(); let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000; assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64); // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value // than 100. - let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap(); + let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None, &logger).unwrap(); let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000; assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat); @@ -9812,7 +9986,7 @@ mod tests { let mut outbound_node_config = UserConfig::default(); outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32; - let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap(); + let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None, &logger).unwrap(); let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64); assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve); @@ -9849,7 +10023,7 @@ mod tests { // Create Node A's channel pointing to Node B's pubkey let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); - let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); // Create Node B's channel by receiving Node A's open_channel message // Make sure A's dust limit is as we expect. @@ -9882,7 +10056,8 @@ mod tests { chain_hash, short_channel_id: 0, timestamp: 0, - flags: 0, + message_flags: 1, // Only must_be_one + channel_flags: 0, cltv_expiry_delta: 100, htlc_minimum_msat: 5, htlc_maximum_msat: MAX_VALUE_MSAT, @@ -9925,7 +10100,7 @@ mod tests { let config = UserConfig::default(); let features = channelmanager::provided_init_features(&config); let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new( - &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None + &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None, &logger ).unwrap(); let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new( &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), @@ -10079,7 +10254,7 @@ mod tests { let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.announced_channel = false; - let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test + let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None, &*logger).unwrap(); // Nothing uses their network key in this test chan.context.holder_dust_limit_satoshis = 546; chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel @@ -10826,7 +11001,7 @@ mod tests { let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let config = UserConfig::default(); let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, - node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap(); + node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None, &logger).unwrap(); let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key(); channel_type_features.set_zero_conf_required(); @@ -10861,7 +11036,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42, - &config, 0, 42, None + &config, 0, 42, None, &logger ).unwrap(); assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx()); @@ -10872,7 +11047,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); @@ -10910,7 +11085,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); // Set `channel_type` to `None` to force the implicit feature negotiation. @@ -10957,7 +11132,7 @@ mod tests { let channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, - None + None, &logger ).unwrap(); let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); @@ -10976,7 +11151,7 @@ mod tests { // LDK. let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new( &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init, - 10000000, 100000, 42, &config, 0, 42, None + 10000000, 100000, 42, &config, 0, 42, None, &logger ).unwrap(); let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)); @@ -11026,7 +11201,8 @@ mod tests { &config, 0, 42, - None + None, + &logger ).unwrap(); let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));