From: Jeffrey Czyz Date: Thu, 27 Jun 2024 19:39:26 +0000 (-0500) Subject: Merge pull request #3129 from optout21/splicing-msgs-update X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=3ccf06416091e107f443ee92027501105c48054b;hp=-c;p=rust-lightning Merge pull request #3129 from optout21/splicing-msgs-update Update splice messages according to new spec draft --- 3ccf06416091e107f443ee92027501105c48054b diff --combined lightning/src/events/mod.rs index 88785186,d717b7ff..e59a3d18 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@@ -789,8 -789,6 +789,8 @@@ pub enum Event /// If the recipient or an intermediate node misbehaves and gives us free money, this may /// overstate the amount paid, though this is unlikely. /// + /// This is only `None` for payments initiated on LDK versions prior to 0.0.103. + /// /// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees fee_paid_msat: Option, }, @@@ -2059,12 -2057,12 +2059,12 @@@ pub enum MessageSendEvent /// The message which should be sent. msg: msgs::Stfu, }, - /// Used to indicate that a splice message should be sent to the peer with the given node id. - SendSplice { + /// Used to indicate that a splice_init message should be sent to the peer with the given node id. + SendSpliceInit { /// The node_id of the node which should receive this message node_id: PublicKey, /// The message which should be sent. - msg: msgs::Splice, + msg: msgs::SpliceInit, }, /// Used to indicate that a splice_ack message should be sent to the peer with the given node id. SendSpliceAck { diff --combined lightning/src/ln/channel.rs index 5415eb2f,a9f8ff00..4d166cd6 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@@ -2118,8 -2118,8 +2118,8 @@@ impl ChannelContext wher /// Returns the holder signer for this channel. #[cfg(test)] - pub fn get_signer(&self) -> &ChannelSignerType { - return &self.holder_signer + pub fn get_mut_signer(&mut self) -> &mut ChannelSignerType { + return &mut self.holder_signer } /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0, @@@ -2146,143 -2146,6 +2146,143 @@@ } } + /// Performs checks against necessary constraints after receiving either an `accept_channel` or + /// `accept_channel2` message. + pub fn do_accept_channel_checks( + &mut self, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures, + common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64, + ) -> Result<(), ChannelError> { + let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits }; + + // Check sanity of message fields: + if !self.is_outbound() { + return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned())); + } + if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) { + return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned())); + } + if common_fields.dust_limit_satoshis > 21000000 * 100000000 { + return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis))); + } + if channel_reserve_satoshis > self.channel_value_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, self.channel_value_satoshis))); + } + if common_fields.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis))); + } + if channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", + channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis))); + } + let full_channel_value_msat = (self.channel_value_satoshis - channel_reserve_satoshis) * 1000; + if common_fields.htlc_minimum_msat >= full_channel_value_msat { + return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat))); + } + let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + if common_fields.to_self_delay > max_delay_acceptable { + return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay))); + } + if common_fields.max_accepted_htlcs < 1 { + return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + } + if common_fields.max_accepted_htlcs > MAX_HTLCS { + return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, MAX_HTLCS))); + } + + // Now check against optional parameters as set by config... + if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { + return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); + } + if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); + } + if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { + return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); + } + if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { + return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); + } + if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + } + if common_fields.minimum_depth > peer_limits.max_minimum_depth { + return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth))); + } + + if let Some(ty) = &common_fields.channel_type { + if *ty != self.channel_type { + return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); + } + } else if their_features.supports_channel_type() { + // Assume they've accepted the channel type as they said they understand it. + } else { + let channel_type = ChannelTypeFeatures::from_init(&their_features); + if channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + } + self.channel_type = channel_type.clone(); + self.channel_transaction_parameters.channel_type_features = channel_type; + } + + let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { + match &common_fields.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); + } + Some(script.clone()) + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + } + } + } else { None }; + + self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis; + self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000); + self.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis); + self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat; + self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs; + + if peer_limits.trust_own_funding_0conf { + self.minimum_depth = Some(common_fields.minimum_depth); + } else { + self.minimum_depth = Some(cmp::max(1, common_fields.minimum_depth)); + } + + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: common_fields.funding_pubkey, + revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint), + payment_point: common_fields.payment_basepoint, + delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint), + htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint) + }; + + self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: common_fields.to_self_delay, + pubkeys: counterparty_pubkeys, + }); + + self.counterparty_cur_commitment_point = Some(common_fields.first_per_commitment_point); + self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + + self.channel_state = ChannelState::NegotiatingFunding( + NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT + ); + self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + + Ok(()) + } + /// Returns the block hash in which our funding transaction was confirmed. pub fn get_funding_tx_confirmed_in(&self) -> Option { self.funding_tx_confirmed_in @@@ -2768,7 -2631,7 +2768,7 @@@ feerate_per_kw = cmp::max(feerate_per_kw, feerate); } let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000); - cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value())) + cmp::max(feerate_per_kw.saturating_add(2530), feerate_plus_quarter.unwrap_or(u32::MAX)) } /// Get forwarding information for the counterparty. @@@ -7272,6 -7135,7 +7272,7 @@@ impl Channel wher channel_id: self.context.channel_id, signature, htlc_signatures, + batch: None, #[cfg(taproot)] partial_signature_with_nonce: None, }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) @@@ -7634,11 -7498,136 +7635,11 @@@ impl OutboundV1Channel w } // Message handlers - pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { - let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; - - // Check sanity of message fields: - if !self.context.is_outbound() { - return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned())); - } - if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) { - return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned())); - } - if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 { - return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); - } - if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", - msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); - } - let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; - if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat))); - } - let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); - if msg.common_fields.to_self_delay > max_delay_acceptable { - return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay))); - } - if msg.common_fields.max_accepted_htlcs < 1 { - return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); - } - if msg.common_fields.max_accepted_htlcs > MAX_HTLCS { - return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS))); - } - - // Now check against optional parameters as set by config... - if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { - return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); - } - if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); - } - if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { - return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); - } - if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { - return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); - } - if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth { - return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth))); - } - - if let Some(ty) = &msg.common_fields.channel_type { - if *ty != self.context.channel_type { - return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); - } - } else if their_features.supports_channel_type() { - // Assume they've accepted the channel type as they said they understand it. - } else { - let channel_type = ChannelTypeFeatures::from_init(&their_features); - if channel_type != ChannelTypeFeatures::only_static_remote_key() { - return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); - } - self.context.channel_type = channel_type.clone(); - self.context.channel_transaction_parameters.channel_type_features = channel_type; - } - - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &msg.common_fields.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); - } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); - } - } - } else { None }; - - self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis; - self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); - self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); - self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat; - self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs; - - if peer_limits.trust_own_funding_0conf { - self.context.minimum_depth = Some(msg.common_fields.minimum_depth); - } else { - self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth)); - } - - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.common_fields.funding_pubkey, - revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint), - payment_point: msg.common_fields.payment_basepoint, - delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint), - htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint) - }; - - self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.common_fields.to_self_delay, - pubkeys: counterparty_pubkeys, - }); - - self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point); - self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; - - self.context.channel_state = ChannelState::NegotiatingFunding( - NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT - ); - self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. - - Ok(()) + pub fn accept_channel( + &mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, + their_features: &InitFeatures + ) -> Result<(), ChannelError> { + self.context.do_accept_channel_checks(default_limits, their_features, &msg.common_fields, msg.channel_reserve_satoshis) } /// Handles a funding_signed message from the remote end. diff --combined lightning/src/ln/channelmanager.rs index d4279d89,ea5150cb..d3550d10 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@@ -66,7 -66,6 +66,7 @@@ use crate::offers::invoice_request::{De use crate::offers::offer::{Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; +use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler}; use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; @@@ -4032,8 -4031,8 +4032,8 @@@ wher self.pending_outbound_payments .send_payment_for_bolt12_invoice( invoice, payment_id, &self.router, self.list_usable_channels(), - || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, - best_block_height, &self.logger, &self.pending_events, + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self, + &self.secp_ctx, best_block_height, &self.logger, &self.pending_events, |args| self.send_payment_along_path(args) ) } @@@ -4896,8 -4895,8 +4896,8 @@@ if short_chan_id != 0 { let mut forwarding_counterparty = None; macro_rules! forwarding_channel_not_found { - () => { - for forward_info in pending_forwards.drain(..) { + ($forward_infos: expr) => { + for forward_info in $forward_infos { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, @@@ -5005,7 -5004,7 +5005,7 @@@ let (counterparty_node_id, forward_chan_id) = match chan_info_opt { Some((cp_id, chan_id)) => (cp_id, chan_id), None => { - forwarding_channel_not_found!(); + forwarding_channel_not_found!(pending_forwards.drain(..)); continue; } }; @@@ -5013,148 -5012,96 +5013,148 @@@ let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); if peer_state_mutex_opt.is_none() { - forwarding_channel_not_found!(); + forwarding_channel_not_found!(pending_forwards.drain(..)); continue; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); - for forward_info in pending_forwards.drain(..) { - let queue_fail_htlc_res = match forward_info { - HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, - prev_user_channel_id, forward_info: PendingHTLCInfo { - incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, - routing: PendingHTLCRouting::Forward { - onion_packet, blinded, .. - }, skimmed_fee_msat, .. + let mut draining_pending_forwards = pending_forwards.drain(..); + while let Some(forward_info) = draining_pending_forwards.next() { + let queue_fail_htlc_res = match forward_info { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { + incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, + routing: PendingHTLCRouting::Forward { + ref onion_packet, blinded, .. + }, skimmed_fee_msat, .. + }, + }) => { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + // Phantom payments are only PendingHTLCRouting::Receive. + phantom_shared_secret: None, + blinded_failure: blinded.map(|b| b.failure), + }); + let next_blinding_point = blinded.and_then(|b| { + let encrypted_tlvs_ss = self.node_signer.ecdh( + Recipient::Node, &b.inbound_blinding_point, None + ).unwrap().secret_bytes(); + onion_utils::next_hop_pubkey( + &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss + ).ok() + }); + + // Forward the HTLC over the most appropriate channel with the corresponding peer, + // applying non-strict forwarding. + // The channel with the least amount of outbound liquidity will be used to maximize the + // probability of being able to successfully forward a subsequent HTLC. + let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase { + ChannelPhase::Funded(chan) => { + let balances = chan.context.get_available_balances(&self.fee_estimator); + if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat && + outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat && + chan.context.is_usable() { + Some((chan, balances)) + } else { + None + } }, - }) => { - let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash)); - log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - user_channel_id: Some(prev_user_channel_id), - channel_id: prev_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - // Phantom payments are only PendingHTLCRouting::Receive. - phantom_shared_secret: None, - blinded_failure: blinded.map(|b| b.failure), - }); - let next_blinding_point = blinded.and_then(|b| { - let encrypted_tlvs_ss = self.node_signer.ecdh( - Recipient::Node, &b.inbound_blinding_point, None - ).unwrap().secret_bytes(); - onion_utils::next_hop_pubkey( - &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss - ).ok() - }); - if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat, - payment_hash, outgoing_cltv_value, htlc_source.clone(), - onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator, - &&logger) - { - if let ChannelError::Ignore(msg) = e { - log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg); + _ => None, + }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c); + let optimal_channel = match maybe_optimal_channel { + Some(chan) => chan, + None => { + // Fall back to the specified channel to return an appropriate error. + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + chan } else { - panic!("Stated return value requirements in send_htlc() were not met"); + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; } + } + }; + + let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash)); + let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) { + "specified" + } else { + "alternate" + }; + log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}", + prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); + if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat, + payment_hash, outgoing_cltv_value, htlc_source.clone(), + onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator, + &&logger) + { + if let ChannelError::Ignore(msg) = e { + log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(failure_code, data), HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } )); - continue; + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; } - None - }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + } + None + }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id)) - }, - HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { + Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id)) + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; + } + }, + HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); let res = chan.queue_fail_malformed_htlc( htlc_id, failure_code, sha256_of_onion, &&logger ); Some((res, htlc_id)) - }, - }; - if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { - if let Err(e) = queue_fail_htlc_res { - if let ChannelError::Ignore(msg) = e { + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; + } + }, + }; + if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { + if let Err(e) = queue_fail_htlc_res { + if let ChannelError::Ignore(msg) = e { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; + } else { + panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; } } - } else { - forwarding_channel_not_found!(); - continue; } } else { 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) { @@@ -6212,13 -6159,21 +6212,13 @@@ } if valid_mpp { for htlc in sources.drain(..) { - let prev_hop_chan_id = htlc.prev_hop.channel_id; - if let Err((pk, err)) = self.claim_funds_from_hop( + self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, |_, definitely_duplicate| { debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }) } - ) { - if let msgs::ErrorAction::IgnoreError = err.err.action { - // We got a temporary failure updating monitor, but will claim the - // HTLC when the monitor updating is restored (or on chain). - let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash)); - log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); - } else { errs.push((pk, err)); } - } + ); } } if !valid_mpp { @@@ -6240,10 -6195,9 +6240,10 @@@ } } - fn claim_funds_from_hop, bool) -> Option>(&self, - prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) - -> Result<(), (PublicKey, MsgHandleErrInternal)> { + fn claim_funds_from_hop, bool) -> Option>( + &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, + completion_action: ComplFunc, + ) { //TODO: Delay the claimed_funds relaying just like we do outbound relay! // If we haven't yet run background events assume we're still deserializing and shouldn't @@@ -6305,7 -6259,7 +6305,7 @@@ let action = if let Some(action) = completion_action(None, true) { action } else { - return Ok(()); + return; }; mem::drop(peer_state_lock); @@@ -6321,7 -6275,7 +6321,7 @@@ } else { debug_assert!(false, "Duplicate claims should always free another channel immediately"); - return Ok(()); + return; }; if let Some(peer_state_mtx) = per_peer_state.get(&node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); @@@ -6346,7 -6300,7 +6346,7 @@@ } } } - return Ok(()); + return; } } } @@@ -6394,6 -6348,7 +6394,6 @@@ // generally always allowed to be duplicative (and it's specifically noted in // `PaymentForwarded`). self.handle_monitor_update_completion_actions(completion_action(None, false)); - Ok(()) } fn finalize_claims(&self, sources: Vec) { @@@ -6426,7 -6381,7 +6426,7 @@@ let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); #[cfg(debug_assertions)] let claiming_chan_funding_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(hop_data, payment_preimage, + self.claim_funds_from_hop(hop_data, payment_preimage, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = if let Some(node_id) = next_channel_counterparty_node_id { @@@ -6520,6 -6475,10 +6520,6 @@@ }) } }); - if let Err((pk, err)) = res { - let result: Result<(), _> = Err(err); - let _ = handle_error!(self, result, pk); - } }, } } @@@ -9693,7 -9652,7 +9693,7 @@@ wher } #[cfg(splicing)] - fn handle_splice(&self, counterparty_node_id: &PublicKey, msg: &msgs::Splice) { + fn handle_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) { let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close( "Splicing not supported".to_owned(), msg.channel_id.clone())), *counterparty_node_id); @@@ -9900,7 -9859,7 +9900,7 @@@ // Quiescence &events::MessageSendEvent::SendStfu { .. } => false, // Splicing - &events::MessageSendEvent::SendSplice { .. } => false, + &events::MessageSendEvent::SendSpliceInit { .. } => false, &events::MessageSendEvent::SendSpliceAck { .. } => false, &events::MessageSendEvent::SendSpliceLocked { .. } => false, // Interactive Transaction Construction @@@ -10378,17 -10337,6 +10378,17 @@@ wher }, } }, + #[cfg(async_payments)] + OffersMessage::StaticInvoice(_invoice) => { + match responder { + Some(responder) => { + responder.respond(OffersMessage::InvoiceError( + InvoiceError::from_string("Static invoices not yet supported".to_string()) + )) + }, + None => return ResponseInstruction::NoResponse, + } + }, OffersMessage::InvoiceError(invoice_error) => { log_trace!(self.logger, "Received invoice_error: {}", invoice_error); ResponseInstruction::NoResponse @@@ -10401,31 -10349,6 +10401,31 @@@ } } +impl +AsyncPaymentsMessageHandler for ChannelManager +where + M::Target: chain::Watch<::EcdsaSigner>, + T::Target: BroadcasterInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + R::Target: Router, + L::Target: Logger, +{ + fn held_htlc_available( + &self, _message: HeldHtlcAvailable, _responder: Option + ) -> ResponseInstruction { + ResponseInstruction::NoResponse + } + + fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {} + + fn release_pending_messages(&self) -> Vec> { + Vec::new() + } +} + impl NodeIdLookUp for ChannelManager where diff --combined lightning/src/ln/functional_test_utils.rs index ab95c0fe,cfec6fb1..7f1ac422 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@@ -31,8 -31,6 +31,8 @@@ use crate::util::errors::APIError use crate::util::logger::Logger; use crate::util::scid_utils; use crate::util::test_channel_signer::TestChannelSigner; +#[cfg(test)] +use crate::util::test_channel_signer::SignerOp; use crate::util::test_utils; use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface}; use crate::util::ser::{ReadableArgs, Writeable}; @@@ -422,7 -420,6 +422,7 @@@ type TestOnionMessenger<'chan_man, 'nod &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>, &'node_cfg test_utils::TestMessageRouter<'chan_mon_cfg>, &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>, + &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>, IgnoringMessageHandler, >; @@@ -485,74 -482,46 +485,74 @@@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> pub fn get_block_header(&self, height: u32) -> Header { self.blocks.lock().unwrap()[height as usize].0.header } - /// Changes the channel signer's availability for the specified peer and channel. + + /// Toggles this node's signer to be available for the given signer operation. + /// This is useful for testing behavior for restoring an async signer that previously + /// could not return a signature immediately. + #[cfg(test)] + pub fn enable_channel_signer_op(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp) { + self.set_channel_signer_ops(peer_id, chan_id, signer_op, true); + } + + /// Toggles this node's signer to be unavailable, returning `Err` for the given signer operation. + /// This is useful for testing behavior for an async signer that cannot return a signature + /// immediately. + #[cfg(test)] + pub fn disable_channel_signer_op(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp) { + self.set_channel_signer_ops(peer_id, chan_id, signer_op, false); + } + + /// Changes the channel signer's availability for the specified peer, channel, and signer + /// operation. /// - /// When `available` is set to `true`, the channel signer will behave normally. When set to - /// `false`, the channel signer will act like an off-line remote signer and will return `Err` for - /// several of the signing methods. Currently, only `get_per_commitment_point` and - /// `release_commitment_secret` are affected by this setting. + /// For the specified signer operation, when `available` is set to `true`, the channel signer + /// will behave normally, returning `Ok`. When set to `false`, and the channel signer will + /// act like an off-line remote signer, returning `Err`. This applies to the signer in all + /// relevant places, i.e. the channel manager, chain monitor, and the keys manager. #[cfg(test)] - pub fn set_channel_signer_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, available: bool) { + fn set_channel_signer_ops(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp, available: bool) { use crate::sign::ChannelSigner; log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available); let per_peer_state = self.node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap(); + let mut chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap(); let mut channel_keys_id = None; - if let Some(chan) = chan_lock.channel_by_id.get(chan_id).map(|phase| phase.context()) { - chan.get_signer().as_ecdsa().unwrap().set_available(available); + if let Some(chan) = chan_lock.channel_by_id.get_mut(chan_id).map(|phase| phase.context_mut()) { + let signer = chan.get_mut_signer().as_mut_ecdsa().unwrap(); + if available { + signer.enable_op(signer_op); + } else { + signer.disable_op(signer_op); + } channel_keys_id = Some(chan.channel_keys_id); } - let mut monitor = None; - for (funding_txo, channel_id) in self.chain_monitor.chain_monitor.list_monitors() { - if *chan_id == channel_id { - monitor = self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok(); - } - } + let monitor = self.chain_monitor.chain_monitor.list_monitors().into_iter() + .find(|(_, channel_id)| *channel_id == *chan_id) + .and_then(|(funding_txo, _)| self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok()); if let Some(monitor) = monitor { - monitor.do_signer_call(|signer| { + monitor.do_mut_signer_call(|signer| { channel_keys_id = channel_keys_id.or(Some(signer.inner.channel_keys_id())); - signer.set_available(available) + if available { + signer.enable_op(signer_op); + } else { + signer.disable_op(signer_op); + } }); } + let channel_keys_id = channel_keys_id.unwrap(); + let mut unavailable_signers_ops = self.keys_manager.unavailable_signers_ops.lock().unwrap(); + let entry = unavailable_signers_ops.entry(channel_keys_id).or_insert(new_hash_set()); if available { - self.keys_manager.unavailable_signers.lock().unwrap() - .remove(channel_keys_id.as_ref().unwrap()); + entry.remove(&signer_op); + if entry.is_empty() { + unavailable_signers_ops.remove(&channel_keys_id); + } } else { - self.keys_manager.unavailable_signers.lock().unwrap() - .insert(channel_keys_id.unwrap()); - } + entry.insert(signer_op); + }; } } @@@ -915,7 -884,7 +915,7 @@@ pub fn remove_first_msg_event_to_node(m MessageSendEvent::SendStfu { node_id, .. } => { node_id == msg_node_id }, - MessageSendEvent::SendSplice { node_id, .. } => { + MessageSendEvent::SendSpliceInit { node_id, .. } => { node_id == msg_node_id }, MessageSendEvent::SendSpliceAck { node_id, .. } => { @@@ -3259,7 -3228,7 +3259,7 @@@ pub fn create_network<'a, 'b: 'a, 'c: ' let dedicated_entropy = DedicatedEntropy(RandomBytes::new([i as u8; 32])); let onion_messenger = OnionMessenger::new( dedicated_entropy, cfgs[i].keys_manager, cfgs[i].logger, &chan_mgrs[i], - &cfgs[i].message_router, &chan_mgrs[i], IgnoringMessageHandler {}, + &cfgs[i].message_router, &chan_mgrs[i], &chan_mgrs[i], IgnoringMessageHandler {}, ); let gossip_sync = P2PGossipSync::new(cfgs[i].network_graph.as_ref(), None, cfgs[i].logger); let wallet_source = Arc::new(test_utils::TestWalletSource::new(SecretKey::from_slice(&[i as u8 + 1; 32]).unwrap())); diff --combined lightning/src/ln/interactivetxs.rs index 947b9a48,653abdbc..b6ed64aa --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@@ -9,14 -9,17 +9,14 @@@ use crate::io_extras::sink; use crate::prelude::*; -use core::ops::Deref; +use bitcoin::absolute::LockTime as AbsoluteLockTime; use bitcoin::amount::Amount; use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; use bitcoin::consensus::Encodable; use bitcoin::policy::MAX_STANDARD_TX_WEIGHT; use bitcoin::transaction::Version; -use bitcoin::{ - absolute::LockTime as AbsoluteLockTime, OutPoint, ScriptBuf, Sequence, Transaction, TxIn, - TxOut, Weight, -}; +use bitcoin::{OutPoint, ScriptBuf, Sequence, Transaction, TxIn, TxOut, Weight}; use crate::chain::chaininterface::fee_for_weight; use crate::events::bump_transaction::{BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT}; @@@ -27,8 -30,6 +27,8 @@@ use crate::ln::types::ChannelId use crate::sign::{EntropySource, P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; use crate::util::ser::TransactionU16LenLimited; +use core::ops::Deref; + /// The number of received `tx_add_input` messages during a negotiation at which point the /// negotiation MUST be failed. const MAX_RECEIVED_TX_ADD_INPUT_COUNT: u16 = 4096; @@@ -98,14 -99,19 +98,14 @@@ pub(crate) enum AbortReason InsufficientFees, OutputsValueExceedsInputsValue, InvalidTx, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct InteractiveTxInput { - serial_id: SerialId, - input: TxIn, - prev_output: TxOut, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct InteractiveTxOutput { - serial_id: SerialId, - tx_out: TxOut, + /// No funding (shared) output found. + MissingFundingOutput, + /// More than one funding (shared) output found. + DuplicateFundingOutput, + /// The intended local part of the funding output is higher than the actual shared funding output, + /// if funding output is provided by the peer this is an interop error, + /// if provided by the same node than internal input consistency error. + InvalidLowFundingOutputValue, } #[derive(Debug, Clone, PartialEq, Eq)] @@@ -129,12 -135,18 +129,12 @@@ impl ConstructedTransaction let local_inputs_value_satoshis = context .inputs .iter() - .filter(|(serial_id, _)| { - !is_serial_id_valid_for_counterparty(context.holder_is_initiator, serial_id) - }) - .fold(0u64, |value, (_, input)| value.saturating_add(input.prev_output.value.to_sat())); + .fold(0u64, |value, (_, input)| value.saturating_add(input.local_value())); let local_outputs_value_satoshis = context .outputs .iter() - .filter(|(serial_id, _)| { - !is_serial_id_valid_for_counterparty(context.holder_is_initiator, serial_id) - }) - .fold(0u64, |value, (_, output)| value.saturating_add(output.tx_out.value.to_sat())); + .fold(0u64, |value, (_, output)| value.saturating_add(output.local_value())); Self { holder_is_initiator: context.holder_is_initiator, @@@ -153,12 -165,18 +153,12 @@@ } pub fn weight(&self) -> Weight { - let inputs_weight = self.inputs.iter().fold( - Weight::from_wu(0), - |weight, InteractiveTxInput { prev_output, .. }| { - weight.checked_add(estimate_input_weight(prev_output)).unwrap_or(Weight::MAX) - }, - ); - let outputs_weight = self.outputs.iter().fold( - Weight::from_wu(0), - |weight, InteractiveTxOutput { tx_out, .. }| { - weight.checked_add(get_output_weight(&tx_out.script_pubkey)).unwrap_or(Weight::MAX) - }, - ); + let inputs_weight = self.inputs.iter().fold(Weight::from_wu(0), |weight, input| { + weight.checked_add(estimate_input_weight(input.prev_output())).unwrap_or(Weight::MAX) + }); + let outputs_weight = self.outputs.iter().fold(Weight::from_wu(0), |weight, output| { + weight.checked_add(get_output_weight(&output.script_pubkey())).unwrap_or(Weight::MAX) + }); Weight::from_wu(TX_COMMON_FIELDS_WEIGHT) .checked_add(inputs_weight) .and_then(|weight| weight.checked_add(outputs_weight)) @@@ -169,12 -187,13 +169,12 @@@ // Inputs and outputs must be sorted by serial_id let ConstructedTransaction { mut inputs, mut outputs, .. } = self; - inputs.sort_unstable_by_key(|InteractiveTxInput { serial_id, .. }| *serial_id); - outputs.sort_unstable_by_key(|InteractiveTxOutput { serial_id, .. }| *serial_id); + inputs.sort_unstable_by_key(|input| input.serial_id()); + outputs.sort_unstable_by_key(|output| output.serial_id); - let input: Vec = - inputs.into_iter().map(|InteractiveTxInput { input, .. }| input).collect(); + let input: Vec = inputs.into_iter().map(|input| input.txin().clone()).collect(); let output: Vec = - outputs.into_iter().map(|InteractiveTxOutput { tx_out, .. }| tx_out).collect(); + outputs.into_iter().map(|output| output.tx_out().clone()).collect(); Transaction { version: Version::TWO, lock_time: self.lock_time, input, output } } @@@ -186,25 -205,9 +186,25 @@@ struct NegotiationContext received_tx_add_input_count: u16, received_tx_add_output_count: u16, inputs: HashMap, + /// The output script intended to be the new funding output script. + /// The script pubkey is used to determine which output is the funding output. + /// When an output with the same script pubkey is added by any of the nodes, it will be + /// treated as the shared output. + /// The value is the holder's intended contribution to the shared funding output. + /// The rest is the counterparty's contribution. + /// When the funding output is added (recognized by its output script pubkey), it will be marked + /// as shared, and split between the peers according to the local value. + /// If the local value is found to be larger than the actual funding output, an error is generated. + expected_shared_funding_output: (ScriptBuf, u64), + /// The actual new funding output, set only after the output has actually been added. + /// NOTE: this output is also included in `outputs`. + actual_new_funding_output: Option, prevtx_outpoints: HashSet, + /// The outputs added so far. outputs: HashMap, + /// The locktime of the funding transaction. tx_locktime: AbsoluteLockTime, + /// The fee rate used for the transaction feerate_sat_per_kw: u32, } @@@ -233,51 -236,26 +233,51 @@@ fn is_serial_id_valid_for_counterparty( } impl NegotiationContext { + fn new( + holder_is_initiator: bool, expected_shared_funding_output: (ScriptBuf, u64), + tx_locktime: AbsoluteLockTime, feerate_sat_per_kw: u32, + ) -> Self { + NegotiationContext { + holder_is_initiator, + received_tx_add_input_count: 0, + received_tx_add_output_count: 0, + inputs: new_hash_map(), + expected_shared_funding_output, + actual_new_funding_output: None, + prevtx_outpoints: new_hash_set(), + outputs: new_hash_map(), + tx_locktime, + feerate_sat_per_kw, + } + } + + fn set_actual_new_funding_output( + &mut self, tx_out: TxOut, + ) -> Result { + if self.actual_new_funding_output.is_some() { + return Err(AbortReason::DuplicateFundingOutput); + } + let value = tx_out.value.to_sat(); + let local_owned = self.expected_shared_funding_output.1; + // Sanity check + if local_owned > value { + return Err(AbortReason::InvalidLowFundingOutputValue); + } + let shared_output = SharedOwnedOutput::new(tx_out, local_owned); + self.actual_new_funding_output = Some(shared_output.clone()); + Ok(shared_output) + } + fn is_serial_id_valid_for_counterparty(&self, serial_id: &SerialId) -> bool { is_serial_id_valid_for_counterparty(self.holder_is_initiator, serial_id) } fn remote_inputs_value(&self) -> u64 { - self.inputs - .iter() - .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id)) - .fold(0u64, |acc, (_, InteractiveTxInput { prev_output, .. })| { - acc.saturating_add(prev_output.value.to_sat()) - }) + self.inputs.iter().fold(0u64, |acc, (_, input)| acc.saturating_add(input.remote_value())) } fn remote_outputs_value(&self) -> u64 { - self.outputs - .iter() - .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id)) - .fold(0u64, |acc, (_, InteractiveTxOutput { tx_out, .. })| { - acc.saturating_add(tx_out.value.to_sat()) - }) + self.outputs.iter().fold(0u64, |acc, (_, output)| acc.saturating_add(output.remote_value())) } fn remote_inputs_weight(&self) -> Weight { @@@ -285,8 -263,8 +285,8 @@@ self.inputs .iter() .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id)) - .fold(0u64, |weight, (_, InteractiveTxInput { prev_output, .. })| { - weight.saturating_add(estimate_input_weight(prev_output).to_wu()) + .fold(0u64, |weight, (_, input)| { + weight.saturating_add(estimate_input_weight(input.prev_output()).to_wu()) }), ) } @@@ -296,8 -274,8 +296,8 @@@ self.outputs .iter() .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id)) - .fold(0u64, |weight, (_, InteractiveTxOutput { tx_out, .. })| { - weight.saturating_add(get_output_weight(&tx_out.script_pubkey).to_wu()) + .fold(0u64, |weight, (_, output)| { + weight.saturating_add(get_output_weight(&output.script_pubkey()).to_wu()) }), ) } @@@ -369,7 -347,7 +369,7 @@@ }, hash_map::Entry::Vacant(entry) => { let prev_outpoint = OutPoint { txid, vout: msg.prevtx_out }; - entry.insert(InteractiveTxInput { + entry.insert(InteractiveTxInput::Remote(LocalOrRemoteInput { serial_id: msg.serial_id, input: TxIn { previous_output: prev_outpoint, @@@ -377,7 -355,7 +377,7 @@@ ..Default::default() }, prev_output: prev_out, - }); + })); self.prevtx_outpoints.insert(prev_outpoint); Ok(()) }, @@@ -426,7 -404,7 +426,7 @@@ // bitcoin supply. let mut outputs_value: u64 = 0; for output in self.outputs.iter() { - outputs_value = outputs_value.saturating_add(output.1.tx_out.value.to_sat()); + outputs_value = outputs_value.saturating_add(output.1.value()); } if outputs_value.saturating_add(msg.sats) > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // The receiving node: @@@ -455,23 -433,6 +455,23 @@@ return Err(AbortReason::InvalidOutputScript); } + let txout = TxOut { value: Amount::from_sat(msg.sats), script_pubkey: msg.script.clone() }; + let is_shared = msg.script == self.expected_shared_funding_output.0; + let output = if is_shared { + // this is a shared funding output + let shared_output = self.set_actual_new_funding_output(txout)?; + InteractiveTxOutput { + serial_id: msg.serial_id, + added_by: AddingRole::Remote, + output: OutputOwned::Shared(shared_output), + } + } else { + InteractiveTxOutput { + serial_id: msg.serial_id, + added_by: AddingRole::Remote, + output: OutputOwned::Single(txout), + } + }; match self.outputs.entry(msg.serial_id) { hash_map::Entry::Occupied(_) => { // The receiving node: @@@ -480,7 -441,13 +480,7 @@@ Err(AbortReason::DuplicateSerialId) }, hash_map::Entry::Vacant(entry) => { - entry.insert(InteractiveTxOutput { - serial_id: msg.serial_id, - tx_out: TxOut { - value: Amount::from_sat(msg.sats), - script_pubkey: msg.script.clone(), - }, - }); + entry.insert(output); Ok(()) }, } @@@ -503,45 -470,35 +503,45 @@@ fn sent_tx_add_input(&mut self, msg: &msgs::TxAddInput) -> Result<(), AbortReason> { let tx = msg.prevtx.as_transaction(); - let input = TxIn { + let txin = TxIn { previous_output: OutPoint { txid: tx.txid(), vout: msg.prevtx_out }, sequence: Sequence(msg.sequence), ..Default::default() }; - let prev_output = - tx.output.get(msg.prevtx_out as usize).ok_or(AbortReason::PrevTxOutInvalid)?.clone(); - if !self.prevtx_outpoints.insert(input.previous_output) { + if !self.prevtx_outpoints.insert(txin.previous_output.clone()) { // We have added an input that already exists return Err(AbortReason::PrevTxOutInvalid); } - self.inputs.insert( - msg.serial_id, - InteractiveTxInput { serial_id: msg.serial_id, input, prev_output }, - ); + let vout = txin.previous_output.vout as usize; + let prev_output = tx.output.get(vout).ok_or(AbortReason::PrevTxOutInvalid)?.clone(); + let input = InteractiveTxInput::Local(LocalOrRemoteInput { + serial_id: msg.serial_id, + input: txin, + prev_output, + }); + self.inputs.insert(msg.serial_id, input); Ok(()) } fn sent_tx_add_output(&mut self, msg: &msgs::TxAddOutput) -> Result<(), AbortReason> { - self.outputs.insert( - msg.serial_id, + let txout = TxOut { value: Amount::from_sat(msg.sats), script_pubkey: msg.script.clone() }; + let is_shared = msg.script == self.expected_shared_funding_output.0; + let output = if is_shared { + // this is a shared funding output + let shared_output = self.set_actual_new_funding_output(txout)?; InteractiveTxOutput { serial_id: msg.serial_id, - tx_out: TxOut { - value: Amount::from_sat(msg.sats), - script_pubkey: msg.script.clone(), - }, - }, - ); + added_by: AddingRole::Local, + output: OutputOwned::Shared(shared_output), + } + } else { + InteractiveTxOutput { + serial_id: msg.serial_id, + added_by: AddingRole::Local, + output: OutputOwned::Single(txout), + } + }; + self.outputs.insert(msg.serial_id, output); Ok(()) } @@@ -597,10 -554,6 +597,10 @@@ return Err(AbortReason::ExceededNumberOfInputsOrOutputs); } + if self.actual_new_funding_output.is_none() { + return Err(AbortReason::MissingFundingOutput); + } + // - the peer's paid feerate does not meet or exceed the agreed feerate (based on the minimum fee). self.check_counterparty_fees(remote_inputs_value.saturating_sub(remote_outputs_value))?; @@@ -809,16 -762,17 +809,16 @@@ macro_rules! define_state_machine_trans } impl StateMachine { - fn new(feerate_sat_per_kw: u32, is_initiator: bool, tx_locktime: AbsoluteLockTime) -> Self { - let context = NegotiationContext { + fn new( + feerate_sat_per_kw: u32, is_initiator: bool, tx_locktime: AbsoluteLockTime, + expected_shared_funding_output: (ScriptBuf, u64), + ) -> Self { + let context = NegotiationContext::new( + is_initiator, + expected_shared_funding_output, tx_locktime, - holder_is_initiator: is_initiator, - received_tx_add_input_count: 0, - received_tx_add_output_count: 0, - inputs: new_hash_map(), - prevtx_outpoints: new_hash_set(), - outputs: new_hash_map(), feerate_sat_per_kw, - }; + ); if is_initiator { Self::ReceivedChangeMsg(ReceivedChangeMsg(context)) } else { @@@ -877,182 -831,11 +877,182 @@@ ]); } +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum AddingRole { + Local, + Remote, +} + +/// Represents an input -- local or remote (both have the same fields) +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct LocalOrRemoteInput { + serial_id: SerialId, + input: TxIn, + prev_output: TxOut, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum InteractiveTxInput { + Local(LocalOrRemoteInput), + Remote(LocalOrRemoteInput), + // TODO(splicing) SharedInput should be added +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SharedOwnedOutput { + tx_out: TxOut, + local_owned: u64, +} + +impl SharedOwnedOutput { + fn new(tx_out: TxOut, local_owned: u64) -> SharedOwnedOutput { + debug_assert!( + local_owned <= tx_out.value.to_sat(), + "SharedOwnedOutput: Inconsistent local_owned value {}, larger than output value {}", + local_owned, + tx_out.value + ); + SharedOwnedOutput { tx_out, local_owned } + } + + fn remote_owned(&self) -> u64 { + self.tx_out.value.to_sat().saturating_sub(self.local_owned) + } +} + +/// Represents an output, with information about +/// its control -- exclusive by the adder or shared --, and +/// its ownership -- value fully owned by the adder or jointly +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum OutputOwned { + /// Belongs to local node -- controlled exclusively and fully belonging to local node + Single(TxOut), + /// Output with shared control, but fully belonging to local node + SharedControlFullyOwned(TxOut), + /// Output with shared control and joint ownership + Shared(SharedOwnedOutput), +} + +impl OutputOwned { + fn tx_out(&self) -> &TxOut { + match self { + OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => tx_out, + OutputOwned::Shared(output) => &output.tx_out, + } + } + + fn value(&self) -> u64 { + self.tx_out().value.to_sat() + } + + fn is_shared(&self) -> bool { + match self { + OutputOwned::Single(_) => false, + OutputOwned::SharedControlFullyOwned(_) => true, + OutputOwned::Shared(_) => true, + } + } + + fn local_value(&self, local_role: AddingRole) -> u64 { + match self { + OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => { + match local_role { + AddingRole::Local => tx_out.value.to_sat(), + AddingRole::Remote => 0, + } + }, + OutputOwned::Shared(output) => output.local_owned, + } + } + + fn remote_value(&self, local_role: AddingRole) -> u64 { + match self { + OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => { + match local_role { + AddingRole::Local => 0, + AddingRole::Remote => tx_out.value.to_sat(), + } + }, + OutputOwned::Shared(output) => output.remote_owned(), + } + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct InteractiveTxOutput { + serial_id: SerialId, + added_by: AddingRole, + output: OutputOwned, +} + +impl InteractiveTxOutput { + fn tx_out(&self) -> &TxOut { + self.output.tx_out() + } + + fn value(&self) -> u64 { + self.tx_out().value.to_sat() + } + + fn local_value(&self) -> u64 { + self.output.local_value(self.added_by) + } + + fn remote_value(&self) -> u64 { + self.output.remote_value(self.added_by) + } + + fn script_pubkey(&self) -> &ScriptBuf { + &self.output.tx_out().script_pubkey + } +} + +impl InteractiveTxInput { + pub fn serial_id(&self) -> SerialId { + match self { + InteractiveTxInput::Local(input) => input.serial_id, + InteractiveTxInput::Remote(input) => input.serial_id, + } + } + + pub fn txin(&self) -> &TxIn { + match self { + InteractiveTxInput::Local(input) => &input.input, + InteractiveTxInput::Remote(input) => &input.input, + } + } + + pub fn prev_output(&self) -> &TxOut { + match self { + InteractiveTxInput::Local(input) => &input.prev_output, + InteractiveTxInput::Remote(input) => &input.prev_output, + } + } + + pub fn value(&self) -> u64 { + self.prev_output().value.to_sat() + } + + pub fn local_value(&self) -> u64 { + match self { + InteractiveTxInput::Local(input) => input.prev_output.value.to_sat(), + InteractiveTxInput::Remote(_input) => 0, + } + } + + pub fn remote_value(&self) -> u64 { + match self { + InteractiveTxInput::Local(_input) => 0, + InteractiveTxInput::Remote(input) => input.prev_output.value.to_sat(), + } + } +} + pub(crate) struct InteractiveTxConstructor { state_machine: StateMachine, channel_id: ChannelId, inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)>, - outputs_to_contribute: Vec<(SerialId, TxOut)>, + outputs_to_contribute: Vec<(SerialId, OutputOwned)>, } pub(crate) enum InteractiveTxMessageSend { @@@ -1096,104 -879,57 +1096,104 @@@ pub(crate) enum HandleTxCompleteValue impl InteractiveTxConstructor { /// Instantiates a new `InteractiveTxConstructor`. /// + /// `expected_remote_shared_funding_output`: In the case when the local node doesn't + /// add a shared output, but it expects a shared output to be added by the remote node, + /// it has to specify the script pubkey, used to determine the shared output, + /// and its (local) contribution from the shared output: + /// 0 when the whole value belongs to the remote node, or + /// positive if owned also by local. + /// Note: The local value cannot be larger that the actual shared output. + /// /// A tuple is returned containing the newly instantiate `InteractiveTxConstructor` and optionally /// an initial wrapped `Tx_` message which the holder needs to send to the counterparty. pub fn new( entropy_source: &ES, channel_id: ChannelId, feerate_sat_per_kw: u32, is_initiator: bool, funding_tx_locktime: AbsoluteLockTime, inputs_to_contribute: Vec<(TxIn, TransactionU16LenLimited)>, - outputs_to_contribute: Vec, - ) -> (Self, Option) + outputs_to_contribute: Vec, + expected_remote_shared_funding_output: Option<(ScriptBuf, u64)>, + ) -> Result<(Self, Option), AbortReason> where ES::Target: EntropySource, { - let state_machine = - StateMachine::new(feerate_sat_per_kw, is_initiator, funding_tx_locktime); - let mut inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)> = - inputs_to_contribute + // Sanity check: There can be at most one shared output, local-added or remote-added + let mut expected_shared_funding_output: Option<(ScriptBuf, u64)> = None; + for output in &outputs_to_contribute { + let new_output = match output { + OutputOwned::Single(_tx_out) => None, + OutputOwned::SharedControlFullyOwned(tx_out) => { + Some((tx_out.script_pubkey.clone(), tx_out.value.to_sat())) + }, + OutputOwned::Shared(output) => { + // Sanity check + if output.local_owned > output.tx_out.value.to_sat() { + return Err(AbortReason::InvalidLowFundingOutputValue); + } + Some((output.tx_out.script_pubkey.clone(), output.local_owned)) + }, + }; + if new_output.is_some() { + if expected_shared_funding_output.is_some() + || expected_remote_shared_funding_output.is_some() + { + // more than one local-added shared output or + // one local-added and one remote-expected shared output + return Err(AbortReason::DuplicateFundingOutput); + } + expected_shared_funding_output = new_output; + } + } + if let Some(expected_remote_shared_funding_output) = expected_remote_shared_funding_output { + expected_shared_funding_output = Some(expected_remote_shared_funding_output); + } + if let Some(expected_shared_funding_output) = expected_shared_funding_output { + let state_machine = StateMachine::new( + feerate_sat_per_kw, + is_initiator, + funding_tx_locktime, + expected_shared_funding_output, + ); + let mut inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)> = + inputs_to_contribute + .into_iter() + .map(|(input, tx)| { + let serial_id = generate_holder_serial_id(entropy_source, is_initiator); + (serial_id, input, tx) + }) + .collect(); + // We'll sort by the randomly generated serial IDs, effectively shuffling the order of the inputs + // as the user passed them to us to avoid leaking any potential categorization of transactions + // before we pass any of the inputs to the counterparty. + inputs_to_contribute.sort_unstable_by_key(|(serial_id, _, _)| *serial_id); + let mut outputs_to_contribute: Vec<_> = outputs_to_contribute .into_iter() - .map(|(input, tx)| { + .map(|output| { let serial_id = generate_holder_serial_id(entropy_source, is_initiator); - (serial_id, input, tx) + (serial_id, output) }) .collect(); - // We'll sort by the randomly generated serial IDs, effectively shuffling the order of the inputs - // as the user passed them to us to avoid leaking any potential categorization of transactions - // before we pass any of the inputs to the counterparty. - inputs_to_contribute.sort_unstable_by_key(|(serial_id, _, _)| *serial_id); - let mut outputs_to_contribute: Vec<(SerialId, TxOut)> = outputs_to_contribute - .into_iter() - .map(|output| { - let serial_id = generate_holder_serial_id(entropy_source, is_initiator); - (serial_id, output) - }) - .collect(); - // In the same manner and for the same rationale as the inputs above, we'll shuffle the outputs. - outputs_to_contribute.sort_unstable_by_key(|(serial_id, _)| *serial_id); - let mut constructor = - Self { state_machine, channel_id, inputs_to_contribute, outputs_to_contribute }; - let message_send = if is_initiator { - match constructor.maybe_send_message() { - Ok(msg_send) => Some(msg_send), - Err(_) => { - debug_assert!( - false, - "We should always be able to start our state machine successfully" - ); - None - }, - } + // In the same manner and for the same rationale as the inputs above, we'll shuffle the outputs. + outputs_to_contribute.sort_unstable_by_key(|(serial_id, _)| *serial_id); + let mut constructor = + Self { state_machine, channel_id, inputs_to_contribute, outputs_to_contribute }; + let message_send = if is_initiator { + match constructor.maybe_send_message() { + Ok(msg_send) => Some(msg_send), + Err(_) => { + debug_assert!( + false, + "We should always be able to start our state machine successfully" + ); + None + }, + } + } else { + None + }; + Ok((constructor, message_send)) } else { - None - }; - (constructor, message_send) + Err(AbortReason::MissingFundingOutput) + } } fn maybe_send_message(&mut self) -> Result { @@@ -1206,6 -942,7 +1206,7 @@@ prevtx, prevtx_out: input.previous_output.vout, sequence: input.sequence.to_consensus_u32(), + shared_input_txid: None, }; do_state_transition!(self, sent_tx_add_input, &msg)?; Ok(InteractiveTxMessageSend::TxAddInput(msg)) @@@ -1213,8 -950,8 +1214,8 @@@ let msg = msgs::TxAddOutput { channel_id: self.channel_id, serial_id, - sats: output.value.to_sat(), - script: output.script_pubkey, + sats: output.tx_out().value.to_sat(), + script: output.tx_out().script_pubkey.clone(), }; do_state_transition!(self, sent_tx_add_output, &msg)?; Ok(InteractiveTxMessageSend::TxAddOutput(msg)) @@@ -1300,7 -1037,6 +1301,7 @@@ mod tests use crate::sign::EntropySource; use crate::util::atomic_counter::AtomicCounter; use crate::util::ser::TransactionU16LenLimited; + use bitcoin::absolute::LockTime as AbsoluteLockTime; use bitcoin::amount::Amount; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::script::Builder; @@@ -1309,13 -1045,13 +1310,13 @@@ use bitcoin::secp256k1::{Keypair, Secp256k1}; use bitcoin::transaction::Version; use bitcoin::{ - absolute::LockTime as AbsoluteLockTime, OutPoint, Sequence, Transaction, TxIn, TxOut, + OutPoint, PubkeyHash, ScriptBuf, Sequence, Transaction, TxIn, TxOut, WPubkeyHash, }; - use bitcoin::{PubkeyHash, ScriptBuf, WPubkeyHash, WScriptHash}; use core::ops::Deref; use super::{ - get_output_weight, P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND, + get_output_weight, AddingRole, OutputOwned, SharedOwnedOutput, + P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND, P2WSH_INPUT_WEIGHT_LOWER_BOUND, TX_COMMON_FIELDS_WEIGHT, }; @@@ -1364,14 -1100,10 +1365,14 @@@ struct TestSession { description: &'static str, inputs_a: Vec<(TxIn, TransactionU16LenLimited)>, - outputs_a: Vec, + outputs_a: Vec, inputs_b: Vec<(TxIn, TransactionU16LenLimited)>, - outputs_b: Vec, + outputs_b: Vec, expect_error: Option<(AbortReason, ErrorCulprit)>, + /// A node adds no shared output, but expects the peer to add one, with the specific script pubkey, and local contribution + a_expected_remote_shared_output: Option<(ScriptBuf, u64)>, + /// B node adds no shared output, but expects the peer to add one, with the specific script pubkey, and local contribution + b_expected_remote_shared_output: Option<(ScriptBuf, u64)>, } fn do_test_interactive_tx_constructor(session: TestSession) { @@@ -1395,103 -1127,24 +1396,103 @@@ let channel_id = ChannelId(entropy_source.get_secure_random_bytes()); let tx_locktime = AbsoluteLockTime::from_height(1337).unwrap(); - let (mut constructor_a, first_message_a) = InteractiveTxConstructor::new( + // funding output sanity check + let shared_outputs_by_a: Vec<_> = + session.outputs_a.iter().filter(|o| o.is_shared()).collect(); + if shared_outputs_by_a.len() > 1 { + println!("Test warning: Expected at most one shared output. NodeA"); + } + let shared_output_by_a = if shared_outputs_by_a.len() >= 1 { + Some(shared_outputs_by_a[0].value()) + } else { + None + }; + let shared_outputs_by_b: Vec<_> = + session.outputs_b.iter().filter(|o| o.is_shared()).collect(); + if shared_outputs_by_b.len() > 1 { + println!("Test warning: Expected at most one shared output. NodeB"); + } + let shared_output_by_b = if shared_outputs_by_b.len() >= 1 { + Some(shared_outputs_by_b[0].value()) + } else { + None + }; + if session.a_expected_remote_shared_output.is_some() + || session.b_expected_remote_shared_output.is_some() + { + let expected_by_a = if let Some(a_expected_remote_shared_output) = + &session.a_expected_remote_shared_output + { + a_expected_remote_shared_output.1 + } else { + if shared_outputs_by_a.len() >= 1 { + shared_outputs_by_a[0].local_value(AddingRole::Local) + } else { + 0 + } + }; + let expected_by_b = if let Some(b_expected_remote_shared_output) = + &session.b_expected_remote_shared_output + { + b_expected_remote_shared_output.1 + } else { + if shared_outputs_by_b.len() >= 1 { + shared_outputs_by_b[0].local_value(AddingRole::Local) + } else { + 0 + } + }; + + let expected_sum = expected_by_a + expected_by_b; + let actual_shared_output = + shared_output_by_a.unwrap_or(shared_output_by_b.unwrap_or(0)); + if expected_sum != actual_shared_output { + println!("Test warning: Sum of expected shared output values does not match actual shared output value, {} {} {} {} {} {}", expected_sum, actual_shared_output, expected_by_a, expected_by_b, shared_output_by_a.unwrap_or(0), shared_output_by_b.unwrap_or(0)); + } + } + + let (mut constructor_a, first_message_a) = match InteractiveTxConstructor::new( entropy_source, channel_id, TEST_FEERATE_SATS_PER_KW, true, tx_locktime, session.inputs_a, - session.outputs_a, - ); - let (mut constructor_b, first_message_b) = InteractiveTxConstructor::new( + session.outputs_a.iter().map(|o| o.clone()).collect(), + session.a_expected_remote_shared_output, + ) { + Ok(r) => r, + Err(abort_reason) => { + assert_eq!( + Some((abort_reason, ErrorCulprit::NodeA)), + session.expect_error, + "Test: {}", + session.description + ); + return; + }, + }; + let (mut constructor_b, first_message_b) = match InteractiveTxConstructor::new( entropy_source, channel_id, TEST_FEERATE_SATS_PER_KW, false, tx_locktime, session.inputs_b, - session.outputs_b, - ); + session.outputs_b.iter().map(|o| o.clone()).collect(), + session.b_expected_remote_shared_output, + ) { + Ok(r) => r, + Err(abort_reason) => { + assert_eq!( + Some((abort_reason, ErrorCulprit::NodeB)), + session.expect_error, + "Test: {}", + session.description + ); + return; + }, + }; let handle_message_send = |msg: InteractiveTxMessageSend, for_constructor: &mut InteractiveTxConstructor| { @@@ -1541,7 -1194,7 +1542,7 @@@ "Test: {}", session.description ); - assert!(message_send_b.is_none()); + assert!(message_send_b.is_none(), "Test: {}", session.description); return; }, } @@@ -1565,7 -1218,7 +1566,7 @@@ "Test: {}", session.description ); - assert!(message_send_a.is_none()); + assert!(message_send_a.is_none(), "Test: {}", session.description); return; }, } @@@ -1574,18 -1227,12 +1575,18 @@@ assert!(message_send_a.is_none()); assert!(message_send_b.is_none()); assert_eq!(final_tx_a.unwrap().into_unsigned_tx(), final_tx_b.unwrap().into_unsigned_tx()); - assert!(session.expect_error.is_none(), "Test: {}", session.description); + assert!( + session.expect_error.is_none(), + "Missing expected error {:?}, Test: {}", + session.expect_error, + session.description, + ); } #[derive(Debug, Clone, Copy)] enum TestOutput { P2WPKH(u64), + /// P2WSH, but with the specific script used for the funding output P2WSH(u64), P2TR(u64), // Non-witness type to test rejection. @@@ -1599,8 -1246,12 +1600,8 @@@ fn generate_txout(output: &TestOutput) -> TxOut { let secp_ctx = Secp256k1::new(); let (value, script_pubkey) = match output { - TestOutput::P2WPKH(value) => { - (*value, ScriptBuf::new_p2wpkh(&WPubkeyHash::from_slice(&[1; 20]).unwrap())) - }, - TestOutput::P2WSH(value) => { - (*value, ScriptBuf::new_p2wsh(&WScriptHash::from_slice(&[2; 32]).unwrap())) - }, + TestOutput::P2WPKH(value) => (*value, generate_p2wpkh_script_pubkey()), + TestOutput::P2WSH(value) => (*value, generate_funding_script_pubkey()), TestOutput::P2TR(value) => ( *value, ScriptBuf::new_p2tr( @@@ -1655,39 -1306,8 +1656,39 @@@ ScriptBuf::new_p2wpkh(&WPubkeyHash::from_slice(&[1; 20]).unwrap()) } - fn generate_outputs(outputs: &[TestOutput]) -> Vec { - outputs.iter().map(generate_txout).collect() + fn generate_funding_script_pubkey() -> ScriptBuf { + Builder::new().push_int(33).into_script().to_p2wsh() + } + + fn generate_output_nonfunding_one(output: &TestOutput) -> OutputOwned { + OutputOwned::Single(generate_txout(output)) + } + + fn generate_outputs(outputs: &[TestOutput]) -> Vec { + outputs.iter().map(|o| generate_output_nonfunding_one(o)).collect() + } + + /// Generate a single output that is the funding output + fn generate_output(output: &TestOutput) -> Vec { + vec![OutputOwned::SharedControlFullyOwned(generate_txout(output))] + } + + /// Generate a single P2WSH output that is the funding output + fn generate_funding_output(value: u64) -> Vec { + generate_output(&TestOutput::P2WSH(value)) + } + + /// Generate a single P2WSH output with shared contribution that is the funding output + fn generate_shared_funding_output_one(value: u64, local_value: u64) -> OutputOwned { + OutputOwned::Shared(SharedOwnedOutput { + tx_out: generate_txout(&TestOutput::P2WSH(value)), + local_owned: local_value, + }) + } + + /// Generate a single P2WSH output with shared contribution that is the funding output + fn generate_shared_funding_output(value: u64, local_value: u64) -> Vec { + vec![generate_shared_funding_output_one(value, local_value)] } fn generate_fixed_number_of_inputs(count: u16) -> Vec<(TxIn, TransactionU16LenLimited)> { @@@ -1729,7 -1349,7 +1730,7 @@@ inputs } - fn generate_fixed_number_of_outputs(count: u16) -> Vec { + fn generate_fixed_number_of_outputs(count: u16) -> Vec { // Set a constant value for each TxOut generate_outputs(&vec![TestOutput::P2WPKH(1_000_000); count as usize]) } @@@ -1738,11 -1358,8 +1739,11 @@@ Builder::new().push_opcode(opcodes::OP_TRUE).into_script().to_p2sh() } - fn generate_non_witness_output(value: u64) -> TxOut { - TxOut { value: Amount::from_sat(value), script_pubkey: generate_p2sh_script_pubkey() } + fn generate_non_witness_output(value: u64) -> OutputOwned { + OutputOwned::Single(TxOut { + value: Amount::from_sat(value), + script_pubkey: generate_p2sh_script_pubkey(), + }) } #[test] @@@ -1753,19 -1370,15 +1754,19 @@@ outputs_a: vec![], inputs_b: vec![], outputs_b: vec![], - expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + expect_error: Some((AbortReason::MissingFundingOutput, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: None, }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution, no initiator inputs", inputs_a: vec![], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution, no initiator outputs", @@@ -1773,19 -1386,15 +1774,19 @@@ outputs_a: vec![], inputs_b: vec![], outputs_b: vec![], - expect_error: None, + expect_error: Some((AbortReason::MissingFundingOutput, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: None, }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution, no fees", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); let p2wpkh_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2WPKH_INPUT_WEIGHT_LOWER_BOUND); let outputs_fee = fee_for_weight( @@@ -1794,106 -1403,92 +1795,106 @@@ ); let tx_common_fields_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, TX_COMMON_FIELDS_WEIGHT); + + let amount_adjusted_with_p2wpkh_fee = + 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee; do_test_interactive_tx_constructor(TestSession { description: "Single contribution, with P2WPKH input, insufficient fees", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */ - )]), + outputs_a: generate_output(&TestOutput::P2WPKH( + amount_adjusted_with_p2wpkh_fee + 1, /* makes fees insuffcient for initiator */ + )), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution with P2WPKH input, sufficient fees", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee, - )]), + outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2wpkh_fee)), inputs_b: vec![], outputs_b: vec![], expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); let p2wsh_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2WSH_INPUT_WEIGHT_LOWER_BOUND); + let amount_adjusted_with_p2wsh_fee = + 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee; do_test_interactive_tx_constructor(TestSession { description: "Single contribution, with P2WSH input, insufficient fees", inputs_a: generate_inputs(&[TestOutput::P2WSH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */ - )]), + outputs_a: generate_output(&TestOutput::P2WPKH( + amount_adjusted_with_p2wsh_fee + 1, /* makes fees insuffcient for initiator */ + )), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution with P2WSH input, sufficient fees", inputs_a: generate_inputs(&[TestOutput::P2WSH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee, - )]), + outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2wsh_fee)), inputs_b: vec![], outputs_b: vec![], expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); let p2tr_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2TR_INPUT_WEIGHT_LOWER_BOUND); + let amount_adjusted_with_p2tr_fee = + 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee; do_test_interactive_tx_constructor(TestSession { description: "Single contribution, with P2TR input, insufficient fees", inputs_a: generate_inputs(&[TestOutput::P2TR(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */ - )]), + outputs_a: generate_output(&TestOutput::P2WPKH( + amount_adjusted_with_p2tr_fee + 1, /* makes fees insuffcient for initiator */ + )), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution with P2TR input, sufficient fees", inputs_a: generate_inputs(&[TestOutput::P2TR(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee, - )]), + outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2tr_fee)), inputs_b: vec![], outputs_b: vec![], expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator contributes sufficient fees, but non-initiator does not", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), outputs_a: vec![], inputs_b: generate_inputs(&[TestOutput::P2WPKH(100_000)]), - outputs_b: generate_outputs(&[TestOutput::P2WPKH(100_000)]), + outputs_b: generate_output(&TestOutput::P2WPKH(100_000)), expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeB)), + a_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), + b_expected_remote_shared_output: None, }); do_test_interactive_tx_constructor(TestSession { description: "Multi-input-output contributions from both sides", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000); 2]), - outputs_a: generate_outputs(&[ - TestOutput::P2WPKH(1_000_000), - TestOutput::P2WPKH(200_000), - ]), + outputs_a: vec![ + generate_shared_funding_output_one(1_000_000, 200_000), + generate_output_nonfunding_one(&TestOutput::P2WPKH(200_000)), + ], inputs_b: generate_inputs(&[ TestOutput::P2WPKH(1_000_000), TestOutput::P2WPKH(500_000), ]), - outputs_b: generate_outputs(&[ - TestOutput::P2WPKH(1_000_000), - TestOutput::P2WPKH(400_000), - ]), + outputs_b: vec![generate_output_nonfunding_one(&TestOutput::P2WPKH(400_000))], expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 800_000)), }); do_test_interactive_tx_constructor(TestSession { @@@ -1903,8 -1498,6 +1904,8 @@@ inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); let tx = @@@ -1916,12 -1509,10 +1917,12 @@@ do_test_interactive_tx_constructor(TestSession { description: "Invalid input sequence from initiator", inputs_a: vec![(invalid_sequence_input, tx.clone())], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::IncorrectInputSequenceValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); let duplicate_input = TxIn { previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 }, @@@ -1931,14 -1522,11 +1932,14 @@@ do_test_interactive_tx_constructor(TestSession { description: "Duplicate prevout from initiator", inputs_a: vec![(duplicate_input.clone(), tx.clone()), (duplicate_input, tx.clone())], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeB)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); + // Non-initiator uses same prevout as initiator. let duplicate_input = TxIn { previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 }, sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, @@@ -1947,27 -1535,10 +1948,27 @@@ do_test_interactive_tx_constructor(TestSession { description: "Non-initiator uses same prevout as initiator", inputs_a: vec![(duplicate_input.clone(), tx.clone())], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_shared_funding_output(1_000_000, 905_000), inputs_b: vec![(duplicate_input.clone(), tx.clone())], outputs_b: vec![], expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 95_000)), + }); + let duplicate_input = TxIn { + previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 }, + sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, + ..Default::default() + }; + do_test_interactive_tx_constructor(TestSession { + description: "Non-initiator uses same prevout as initiator", + inputs_a: vec![(duplicate_input.clone(), tx.clone())], + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), + inputs_b: vec![(duplicate_input.clone(), tx.clone())], + outputs_b: vec![], + expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator sends too many TxAddInputs", @@@ -1976,8 -1547,6 +1977,8 @@@ inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::ReceivedTooManyTxAddInputs, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor_with_entropy_source( TestSession { @@@ -1988,8 -1557,6 +1989,8 @@@ inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }, &DuplicateEntropySource, ); @@@ -2000,30 -1567,24 +2001,30 @@@ inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::ReceivedTooManyTxAddOutputs, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator sends an output below dust value", inputs_a: vec![], - outputs_a: generate_outputs(&[TestOutput::P2WSH( + outputs_a: generate_funding_output( generate_p2wsh_script_pubkey().dust_value().to_sat() - 1, - )]), + ), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::BelowDustLimit, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator sends an output above maximum sats allowed", inputs_a: vec![], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1)]), + outputs_a: generate_output(&TestOutput::P2WPKH(TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::ExceededMaximumSatsAllowed, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator sends an output without a witness program", @@@ -2032,8 -1593,6 +2033,8 @@@ inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InvalidOutputScript, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor_with_entropy_source( TestSession { @@@ -2044,8 -1603,6 +2045,8 @@@ inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }, &DuplicateEntropySource, ); @@@ -2053,12 -1610,10 +2054,12 @@@ do_test_interactive_tx_constructor(TestSession { description: "Peer contributed more output value than inputs", inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { @@@ -2071,8 -1626,6 +2072,8 @@@ AbortReason::ExceededNumberOfInputsOrOutputs, ErrorCulprit::Indeterminate, )), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Peer contributed more than allowed number of outputs", @@@ -2084,121 -1637,6 +2085,121 @@@ AbortReason::ExceededNumberOfInputsOrOutputs, ErrorCulprit::Indeterminate, )), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + }); + + // Adding multiple outputs to the funding output pubkey is an error + do_test_interactive_tx_constructor(TestSession { + description: "Adding two outputs to the funding output pubkey", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_funding_output(100_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(1_001_000)]), + outputs_b: generate_funding_output(100_000), + expect_error: Some((AbortReason::DuplicateFundingOutput, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: None, + }); + + // We add the funding output, but we contribute a little + do_test_interactive_tx_constructor(TestSession { + description: "Funding output by us, small contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(12_000)]), + outputs_a: generate_shared_funding_output(1_000_000, 10_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(992_000)]), + outputs_b: vec![], + expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 990_000)), + }); + + // They add the funding output, and we contribute a little + do_test_interactive_tx_constructor(TestSession { + description: "Funding output by them, small contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(12_000)]), + outputs_a: vec![], + inputs_b: generate_inputs(&[TestOutput::P2WPKH(992_000)]), + outputs_b: generate_shared_funding_output(1_000_000, 990_000), + expect_error: None, + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 10_000)), + b_expected_remote_shared_output: None, + }); + + // We add the funding output, and we contribute most + do_test_interactive_tx_constructor(TestSession { + description: "Funding output by us, large contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(992_000)]), + outputs_a: generate_shared_funding_output(1_000_000, 990_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(12_000)]), + outputs_b: vec![], + expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 10_000)), + }); + + // They add the funding output, but we contribute most + do_test_interactive_tx_constructor(TestSession { + description: "Funding output by them, large contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(992_000)]), + outputs_a: vec![], + inputs_b: generate_inputs(&[TestOutput::P2WPKH(12_000)]), + outputs_b: generate_shared_funding_output(1_000_000, 10_000), + expect_error: None, + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 990_000)), + b_expected_remote_shared_output: None, + }); + + // During a splice-out, with peer providing more output value than input value + // but still pays enough fees due to their to_remote_value_satoshis portion in + // the shared input. + do_test_interactive_tx_constructor(TestSession { + description: "Splice out with sufficient initiator balance", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(50_000)]), + outputs_a: generate_funding_output(120_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(50_000)]), + outputs_b: vec![], + expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + }); + + // During a splice-out, with peer providing more output value than input value + // and the to_remote_value_satoshis portion in + // the shared input cannot cover fees + do_test_interactive_tx_constructor(TestSession { + description: "Splice out with insufficient initiator balance", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]), + outputs_a: generate_funding_output(120_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]), + outputs_b: vec![], + expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + }); + + // The actual funding output value is lower than the intended local contribution by the same node + do_test_interactive_tx_constructor(TestSession { + description: "Splice in, invalid intended local contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]), + outputs_a: generate_shared_funding_output(100_000, 120_000), // local value is higher than the output value + inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]), + outputs_b: vec![], + expect_error: Some((AbortReason::InvalidLowFundingOutputValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 20_000)), + }); + + // The actual funding output value is lower than the intended local contribution of the other node + do_test_interactive_tx_constructor(TestSession { + description: "Splice in, invalid intended local contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]), + outputs_a: vec![], + inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]), + outputs_b: generate_funding_output(100_000), + // The error is caused by NodeA, it occurs when nodeA prepares the message to be sent to NodeB, that's why here it shows up as NodeB + expect_error: Some((AbortReason::InvalidLowFundingOutputValue, ErrorCulprit::NodeB)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 120_000)), // this is higher than the actual output value + b_expected_remote_shared_output: None, }); } diff --combined lightning/src/ln/peer_handler.rs index 9a026d70,4911fb24..1b75755f --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@@ -28,7 -28,6 +28,7 @@@ use crate::util::ser::{VecWriter, Write use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE}; use crate::ln::wire; use crate::ln::wire::{Encode, Type}; +use crate::onion_message::async_payments::{AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc}; use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage, Responder, ResponseInstruction}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::onion_message::packet::OnionMessageContents; @@@ -149,14 -148,6 +149,14 @@@ impl OffersMessageHandler for IgnoringM ResponseInstruction::NoResponse } } +impl AsyncPaymentsMessageHandler for IgnoringMessageHandler { + fn held_htlc_available( + &self, _message: HeldHtlcAvailable, _responder: Option, + ) -> ResponseInstruction { + ResponseInstruction::NoResponse + } + fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {} +} impl CustomOnionMessageHandler for IgnoringMessageHandler { type CustomMessage = Infallible; fn handle_custom_message(&self, _message: Self::CustomMessage, _responder: Option) -> ResponseInstruction { @@@ -274,7 -265,7 +274,7 @@@ impl ChannelMessageHandler for Erroring ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id); } #[cfg(splicing)] - fn handle_splice(&self, their_node_id: &PublicKey, msg: &msgs::Splice) { + fn handle_splice_init(&self, their_node_id: &PublicKey, msg: &msgs::SpliceInit) { ErroringMessageHandler::push_error(&self, their_node_id, msg.channel_id); } #[cfg(splicing)] @@@ -1815,8 -1806,8 +1815,8 @@@ impl { - self.message_handler.chan_handler.handle_splice(&their_node_id, &msg); + wire::Message::SpliceInit(msg) => { + self.message_handler.chan_handler.handle_splice_init(&their_node_id, &msg); } #[cfg(splicing)] wire::Message::SpliceAck(msg) => { @@@ -2154,9 -2145,9 +2154,9 @@@ &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); } - MessageSendEvent::SendSplice { ref node_id, ref msg} => { + MessageSendEvent::SendSpliceInit { ref node_id, ref msg} => { let logger = WithContext::from(&self.logger, Some(*node_id), Some(msg.channel_id), None); - log_debug!(logger, "Handling SendSplice event in peer_handler for node {} for channel {}", + log_debug!(logger, "Handling SendSpliceInit event in peer_handler for node {} for channel {}", log_pubkey!(node_id), &msg.channel_id); self.enqueue_message(&mut *get_peer_for_forwarding!(node_id), msg); diff --combined lightning/src/util/test_utils.rs index 4d17f8fd,2fea6109..4b2c3c2e --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@@ -79,8 -79,6 +79,8 @@@ use std::time::{SystemTime, UNIX_EPOCH} use bitcoin::psbt::Psbt; use bitcoin::Sequence; +use super::test_channel_signer::SignerOp; + pub fn pubkey(byte: u8) -> PublicKey { let secp_ctx = Secp256k1::new(); PublicKey::from_secret_key(&secp_ctx, &privkey(byte)) @@@ -547,16 -545,12 +547,16 @@@ pub struct TestPersister /// /// [`ChannelMonitor`]: channelmonitor::ChannelMonitor pub offchain_monitor_updates: Mutex>>, + /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the + /// monitor's funding outpoint here. + pub chain_sync_monitor_persistences: Mutex> } impl TestPersister { pub fn new() -> Self { Self { update_rets: Mutex::new(VecDeque::new()), offchain_monitor_updates: Mutex::new(new_hash_map()), + chain_sync_monitor_persistences: Mutex::new(VecDeque::new()) } } @@@ -579,18 -573,15 +579,18 @@@ impl>>>, expectations: Mutex>>, pub unavailable_signers: Mutex>, + pub unavailable_signers_ops: Mutex>>, } impl EntropySource for TestKeysInterface { @@@ -1283,11 -1273,9 +1283,11 @@@ impl SignerProvider for TestKeysInterfa fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner { let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id); let state = self.make_enforcement_state_cell(keys.commitment_seed); - let signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check); - if self.unavailable_signers.lock().unwrap().contains(&channel_keys_id) { - signer.set_available(false); + let mut signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check); + if let Some(ops) = self.unavailable_signers_ops.lock().unwrap().get(&channel_keys_id) { + for &op in ops { + signer.disable_op(op); + } } signer } @@@ -1328,7 -1316,6 +1328,7 @@@ impl TestKeysInterface enforcement_states: Mutex::new(new_hash_map()), expectations: Mutex::new(None), unavailable_signers: Mutex::new(new_hash_set()), + unavailable_signers_ops: Mutex::new(new_hash_map()), } }