X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannel.rs;h=3a324e6f4d8856197e8351ef675809aa48a71825;hb=adc1b55a6fa064852d838ceb91b11e6b228d169e;hp=62a2b7b51e52b723231fec7bec63f088c4c793a2;hpb=41a6c674f9c06c89b0890364ac75b57b648e538e;p=rust-lightning diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 62a2b7b5..3a324e6f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -25,19 +25,19 @@ use bitcoin::secp256k1; use crate::ln::{PaymentPreimage, PaymentHash}; use crate::ln::features::{ChannelTypeFeatures, InitFeatures}; use crate::ln::msgs; -use crate::ln::msgs::{DecodeError, OptionalField, DataLossProtect}; +use crate::ln::msgs::DecodeError; use crate::ln::script::{self, ShutdownScript}; -use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; +use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT}; use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction}; use crate::ln::chan_utils; use crate::ln::onion_utils::HTLCFailReason; use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator}; -use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS}; +use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::chain::keysinterface::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; +use crate::sign::{WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient}; +use crate::events::ClosureReason; use crate::routing::gossip::NodeId; -use crate::util::events::ClosureReason; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter}; use crate::util::logger::Logger; use crate::util::errors::APIError; @@ -192,6 +192,7 @@ enum OutboundHTLCState { #[derive(Clone)] enum OutboundHTLCOutcome { + /// LDK version 0.0.105+ will always fill in the preimage here. Success(Option), Failure(HTLCFailReason), } @@ -311,9 +312,9 @@ pub(super) enum ChannelUpdateStatus { /// We've announced the channel as enabled and are connected to our peer. Enabled, /// Our channel is no longer live, but we haven't announced the channel as disabled yet. - DisabledStaged, + DisabledStaged(u8), /// Our channel is live again, but we haven't announced the channel as enabled yet. - EnabledStaged, + EnabledStaged(u8), /// We've announced the channel as disabled. Disabled, } @@ -393,35 +394,21 @@ enum UpdateFulfillFetch { } /// The return type of get_update_fulfill_htlc_and_commit. -pub enum UpdateFulfillCommitFetch { +pub enum UpdateFulfillCommitFetch<'a> { /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed /// it in the holding cell, or re-generated the update_fulfill message after the same claim was /// previously placed in the holding cell (and has since been removed). NewClaim { /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor - monitor_update: ChannelMonitorUpdate, + monitor_update: &'a ChannelMonitorUpdate, /// The value of the HTLC which was claimed, in msat. htlc_value_msat: u64, - /// The update_fulfill message and commitment_signed message (if the claim was not placed - /// in the holding cell). - msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)>, }, /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell /// or has been forgotten (presumably previously claimed). DuplicateClaim {}, } -/// The return value of `revoke_and_ack` on success, primarily updates to other channels or HTLC -/// state. -pub(super) struct RAAUpdates { - pub commitment_update: Option, - pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>, - pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, - pub finalized_claimed_htlcs: Vec, - pub monitor_update: ChannelMonitorUpdate, - pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>, -} - /// The return value of `monitor_updating_restored` pub(super) struct MonitorRestoreUpdates { pub raa: Option, @@ -512,6 +499,7 @@ pub(super) struct Channel { user_id: u128, channel_id: [u8; 32], + temporary_channel_id: Option<[u8; 32]>, // Will be `None` for channels created prior to 0.0.115. channel_state: u32, // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to @@ -558,6 +546,11 @@ pub(super) struct Channel { monitor_pending_channel_ready: bool, monitor_pending_revoke_and_ack: bool, monitor_pending_commitment_signed: bool, + + // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately + // responsible for some of the HTLCs here or not - we don't know whether the update in question + // completed or not. We currently ignore these fields entirely when force-closing a channel, + // but need to handle this somehow or we run the risk of losing HTLCs! monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>, monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, monitor_pending_finalized_fulfills: Vec, @@ -660,7 +653,7 @@ pub(super) struct Channel { pub counterparty_max_accepted_htlcs: u16, #[cfg(not(test))] counterparty_max_accepted_htlcs: u16, - //implied by OUR_MAX_HTLCS: max_accepted_htlcs: u16, + holder_max_accepted_htlcs: u16, minimum_depth: Option, counterparty_forwarding_info: Option, @@ -737,12 +730,21 @@ pub(super) struct Channel { // blinded paths instead of simple scid+node_id aliases. outbound_scid_alias: u64, + // We track whether we already emitted a `ChannelPending` event. + channel_pending_event_emitted: bool, + // We track whether we already emitted a `ChannelReady` event. channel_ready_event_emitted: bool, /// The unique identifier used to re-derive the private key material for the channel through /// [`SignerProvider::derive_channel_signer`]. channel_keys_id: [u8; 32], + + /// When we generate [`ChannelMonitorUpdate`]s to persist, they may not be persisted immediately. + /// If we then persist the [`channelmanager::ChannelManager`] and crash before the persistence + /// completes we still need to be able to complete the persistence. Thus, we have to keep a + /// copy of the [`ChannelMonitorUpdate`] here until it is complete. + pending_monitor_updates: Vec, } #[cfg(any(test, fuzzing))] @@ -754,7 +756,7 @@ struct CommitmentTxInfoCached { feerate: u32, } -pub const OUR_MAX_HTLCS: u16 = 50; //TODO +pub const DEFAULT_MAX_HTLCS: u16 = 50; pub(crate) fn commitment_tx_base_weight(opt_anchors: bool) -> u64 { const COMMITMENT_TX_BASE_WEIGHT: u64 = 724; @@ -984,7 +986,10 @@ impl Channel { secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - Some(signer_provider.get_shutdown_scriptpubkey()) + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}), + } } else { None }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { @@ -993,6 +998,13 @@ impl Channel { } } + let destination_script = match signer_provider.get_destination_script() { + Ok(script) => script, + Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}), + }; + + let temporary_channel_id = entropy_source.get_secure_random_bytes(); + Ok(Channel { user_id, @@ -1006,7 +1018,8 @@ impl Channel { inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()), - channel_id: entropy_source.get_secure_random_bytes(), + channel_id: temporary_channel_id, + temporary_channel_id: Some(temporary_channel_id), channel_state: ChannelState::OurInitSent as u32, announcement_sigs_state: AnnouncementSigsState::NotSent, secp_ctx, @@ -1016,7 +1029,7 @@ impl Channel { holder_signer, shutdown_scriptpubkey, - destination_script: signer_provider.get_destination_script(), + destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, @@ -1067,6 +1080,7 @@ impl Channel { counterparty_htlc_minimum_msat: 0, holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, counterparty_max_accepted_htlcs: 0, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), minimum_depth: None, // Filled in in accept_channel counterparty_forwarding_info: None, @@ -1105,6 +1119,7 @@ impl Channel { latest_inbound_scid_alias: None, outbound_scid_alias, + channel_pending_event_emitted: false, channel_ready_event_emitted: false, #[cfg(any(test, fuzzing))] @@ -1112,6 +1127,8 @@ impl Channel { channel_type, channel_keys_id, + + pending_monitor_updates: Vec::new(), }) } @@ -1305,7 +1322,7 @@ impl Channel { let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { match &msg.shutdown_scriptpubkey { - &OptionalField::Present(ref script) => { + &Some(ref script) => { // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything if script.len() == 0 { None @@ -1317,14 +1334,17 @@ impl Channel { } }, // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &OptionalField::Absent => { + &None => { return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); } } } else { None }; let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey { - Some(signer_provider.get_shutdown_scriptpubkey()) + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => Some(scriptpubkey), + Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())), + } } else { None }; if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey { @@ -1333,6 +1353,11 @@ impl Channel { } } + let destination_script = match signer_provider.get_destination_script() { + Ok(script) => script, + Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())), + }; + let mut secp_ctx = Secp256k1::new(); secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes()); @@ -1350,6 +1375,7 @@ impl Channel { inbound_handshake_limits_override: None, channel_id: msg.temporary_channel_id, + temporary_channel_id: Some(msg.temporary_channel_id), channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32), announcement_sigs_state: AnnouncementSigsState::NotSent, secp_ctx, @@ -1358,7 +1384,7 @@ impl Channel { holder_signer, shutdown_scriptpubkey, - destination_script: signer_provider.get_destination_script(), + destination_script, cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER, @@ -1410,6 +1436,7 @@ impl Channel { counterparty_htlc_minimum_msat: msg.htlc_minimum_msat, holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat }, counterparty_max_accepted_htlcs: msg.max_accepted_htlcs, + holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS), minimum_depth: Some(cmp::max(config.channel_handshake_config.minimum_depth, 1)), counterparty_forwarding_info: None, @@ -1451,6 +1478,7 @@ impl Channel { latest_inbound_scid_alias: None, outbound_scid_alias, + channel_pending_event_emitted: false, channel_ready_event_emitted: false, #[cfg(any(test, fuzzing))] @@ -1458,6 +1486,8 @@ impl Channel { channel_type, channel_keys_id, + + pending_monitor_updates: Vec::new(), }; Ok(chan) @@ -1964,22 +1994,30 @@ impl Channel { } } - pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> Result where L::Target: Logger { + pub fn get_update_fulfill_htlc_and_commit(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger { match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) { - UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(update_fulfill_htlc) } => { - let (commitment, mut additional_update) = match self.send_commitment_no_status_check(logger) { - Err(e) => return Err((e, monitor_update)), - Ok(res) => res - }; - // send_commitment_no_status_check may bump latest_monitor_id but we want them to be + UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg: Some(_) } => { + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: Some((update_fulfill_htlc, commitment)) }) + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.pending_monitor_updates.push(monitor_update); + UpdateFulfillCommitFetch::NewClaim { + monitor_update: self.pending_monitor_updates.last().unwrap(), + htlc_value_msat, + } }, - UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => - Ok(UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, msgs: None }), - UpdateFulfillFetch::DuplicateClaim {} => Ok(UpdateFulfillCommitFetch::DuplicateClaim {}), + UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None } => { + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.pending_monitor_updates.push(monitor_update); + UpdateFulfillCommitFetch::NewClaim { + monitor_update: self.pending_monitor_updates.last().unwrap(), + htlc_value_msat, + } + } + UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {}, } } @@ -2169,7 +2207,7 @@ impl Channel { let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { match &msg.shutdown_scriptpubkey { - &OptionalField::Present(ref script) => { + &Some(ref script) => { // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything if script.len() == 0 { None @@ -2181,7 +2219,7 @@ impl Channel { } }, // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &OptionalField::Absent => { + &None => { return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); } } @@ -2259,9 +2297,9 @@ impl Channel { pub fn funding_created( &mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(msgs::FundingSigned, ChannelMonitor<::Signer>, Option), ChannelError> + ) -> Result<(msgs::FundingSigned, ChannelMonitor), ChannelError> where - SP::Target: SignerProvider, + SP::Target: SignerProvider, L::Target: Logger { if self.is_outbound() { @@ -2337,19 +2375,24 @@ impl Channel { log_info!(logger, "Generated funding_signed for peer for channel {}", log_bytes!(self.channel_id())); + let need_channel_ready = self.check_get_channel_ready(0).is_some(); + self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + Ok((msgs::FundingSigned { channel_id: self.channel_id, - signature - }, channel_monitor, self.check_get_channel_ready(0))) + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }, channel_monitor)) } /// Handles a funding_signed message from the remote end. /// If this call is successful, broadcast the funding transaction (and not before!) pub fn funding_signed( &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(ChannelMonitor<::Signer>, Transaction, Option), ChannelError> + ) -> Result, ChannelError> where - SP::Target: SignerProvider, + SP::Target: SignerProvider, L::Target: Logger { if !self.is_outbound() { @@ -2422,7 +2465,9 @@ impl Channel { log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id())); - Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0))) + let need_channel_ready = self.check_get_channel_ready(0).is_some(); + self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + Ok(channel_monitor) } /// Handles a channel_ready message from our peer. If we've already sent our channel_ready @@ -2469,6 +2514,11 @@ impl Channel { // If they haven't ever sent an updated point, the point they send should match // the current one. self.counterparty_cur_commitment_point + } else if self.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 { + // If we've advanced the commitment number once, the second commitment point is + // at `counterparty_prev_commitment_point`, which is not yet revoked. + debug_assert!(self.counterparty_prev_commitment_point.is_some()); + self.counterparty_prev_commitment_point } else { // If they have sent updated points, channel_ready is always supposed to match // their "first" point, which we re-derive here. @@ -2842,8 +2892,8 @@ impl Channel { let inbound_stats = self.get_inbound_pending_htlc_stats(None); let outbound_stats = self.get_outbound_pending_htlc_stats(None); - if inbound_stats.pending_htlcs + 1 > OUR_MAX_HTLCS as u32 { - return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS))); + if inbound_stats.pending_htlcs + 1 > self.holder_max_accepted_htlcs as u32 { + return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.holder_max_accepted_htlcs))); } if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.holder_max_htlc_value_in_flight_msat { return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.holder_max_htlc_value_in_flight_msat))); @@ -3034,17 +3084,17 @@ impl Channel { Ok(()) } - pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<(msgs::RevokeAndACK, Option, ChannelMonitorUpdate), (Option, ChannelError)> + pub fn commitment_signed(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<&ChannelMonitorUpdate, ChannelError> where L::Target: Logger { if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { - return Err((None, ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()))); + return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned())); } if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { - return Err((None, ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()))); + return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned())); } if self.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.last_sent_closing_fee.is_some() { - return Err((None, ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()))); + return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned())); } let funding_script = self.get_funding_redeemscript(); @@ -3062,7 +3112,7 @@ impl Channel { log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), log_bytes!(self.channel_id())); if let Err(_) = self.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) { - return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned()))); + return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned())); } bitcoin_tx.txid }; @@ -3077,7 +3127,7 @@ impl Channel { debug_assert!(!self.is_outbound()); let counterparty_reserve_we_require_msat = self.holder_selected_channel_reserve_satoshis * 1000; if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { - return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()))); + return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())); } } #[cfg(any(test, fuzzing))] @@ -3099,12 +3149,27 @@ impl Channel { } if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs { - return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)))); + return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs))); } - // TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update + // Up to LDK 0.0.115, HTLC information was required to be duplicated in the + // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed + // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of + // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for + // backwards compatibility, we never use it in production. To provide test coverage, here, + // we randomly decide (in test/fuzzing builds) to use the new vec sometimes. + #[allow(unused_assignments, unused_mut)] + let mut separate_nondust_htlc_sources = false; + #[cfg(all(feature = "std", any(test, fuzzing)))] { + use core::hash::{BuildHasher, Hasher}; + // Get a random value using the only std API to do so - the DefaultHasher + let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish(); + separate_nondust_htlc_sources = rand_val % 2 == 0; + } + + let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len()); let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len()); - for (idx, (htlc, source)) in htlcs_cloned.drain(..).enumerate() { + for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() { if let Some(_) = htlc.transaction_output_index { let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw, self.get_counterparty_selected_contest_delay().unwrap(), &htlc, self.opt_anchors(), @@ -3117,12 +3182,20 @@ impl Channel { log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()), encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), log_bytes!(self.channel_id())); if let Err(_) = self.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) { - return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()))); + return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())); + } + if !separate_nondust_htlc_sources { + htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take())); } - htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source)); } else { - htlcs_and_sigs.push((htlc, None, source)); + htlcs_and_sigs.push((htlc, None, source_opt.take())); + } + if separate_nondust_htlc_sources { + if let Some(source) = source_opt.take() { + nondust_htlc_sources.push(source); + } } + debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere"); } let holder_commitment_tx = HolderCommitmentTransaction::new( @@ -3133,10 +3206,8 @@ impl Channel { self.counterparty_funding_pubkey() ); - let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number - 1, &self.secp_ctx); self.holder_signer.validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages) - .map_err(|_| (None, ChannelError::Close("Failed to validate our commitment".to_owned())))?; - let per_commitment_secret = self.holder_signer.release_commitment_secret(self.cur_holder_commitment_transaction_number + 1); + .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; // Update state now that we've passed all the can-fail calls... let mut need_commitment = false; @@ -3147,15 +3218,6 @@ impl Channel { } } - self.latest_monitor_update_id += 1; - let mut monitor_update = ChannelMonitorUpdate { - update_id: self.latest_monitor_update_id, - updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { - commitment_tx: holder_commitment_tx, - htlc_outputs: htlcs_and_sigs - }] - }; - for htlc in self.pending_inbound_htlcs.iter_mut() { let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state { Some(forward_info.clone()) @@ -3167,6 +3229,7 @@ impl Channel { need_commitment = true; } } + let mut claimed_htlcs = Vec::new(); for htlc in self.pending_outbound_htlcs.iter_mut() { if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state { log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.", @@ -3174,14 +3237,34 @@ impl Channel { // Grab the preimage, if it exists, instead of cloning let mut reason = OutboundHTLCOutcome::Success(None); mem::swap(outcome, &mut reason); + if let OutboundHTLCOutcome::Success(Some(preimage)) = reason { + // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b) + // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could + // have a `Success(None)` reason. In this case we could forget some HTLC + // claims, but such an upgrade is unlikely and including claimed HTLCs here + // fixes a bug which the user was exposed to on 0.0.104 when they started the + // claim anyway. + claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage)); + } htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason); need_commitment = true; } } + self.latest_monitor_update_id += 1; + let mut monitor_update = ChannelMonitorUpdate { + update_id: self.latest_monitor_update_id, + updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { + commitment_tx: holder_commitment_tx, + htlc_outputs: htlcs_and_sigs, + claimed_htlcs, + nondust_htlc_sources, + }] + }; + self.cur_holder_commitment_transaction_number -= 1; // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call - // send_commitment_no_status_check() next which will reset this to RAAFirst. + // build_commitment_no_status_check() next which will reset this to RAAFirst. self.resend_order = RAACommitmentOrder::CommitmentFirst; if (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 { @@ -3193,52 +3276,50 @@ impl Channel { // the corresponding HTLC status updates so that get_last_commitment_update // includes the right HTLCs. self.monitor_pending_commitment_signed = true; - let (_, mut additional_update) = self.send_commitment_no_status_check(logger).map_err(|e| (None, e))?; - // send_commitment_no_status_check may bump latest_monitor_id but we want them to be + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); } log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id)); - return Err((Some(monitor_update), ChannelError::Ignore("Previous monitor update failure prevented generation of RAA".to_owned()))); + self.pending_monitor_updates.push(monitor_update); + return Ok(self.pending_monitor_updates.last().unwrap()); } - let commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { + let need_commitment_signed = if need_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 { // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok - // we'll send one right away when we get the revoke_and_ack when we // free_holding_cell_htlcs(). - let (msg, mut additional_update) = self.send_commitment_no_status_check(logger).map_err(|e| (None, e))?; - // send_commitment_no_status_check may bump latest_monitor_id but we want them to be + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Some(msg) - } else { None }; + true + } else { false }; log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.", - log_bytes!(self.channel_id()), if commitment_signed.is_some() { " our own commitment_signed and" } else { "" }); - - Ok((msgs::RevokeAndACK { - channel_id: self.channel_id, - per_commitment_secret, - next_per_commitment_point, - }, commitment_signed, monitor_update)) + log_bytes!(self.channel_id()), if need_commitment_signed { " our own commitment_signed and" } else { "" }); + self.pending_monitor_updates.push(monitor_update); + self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new()); + return Ok(self.pending_monitor_updates.last().unwrap()); } /// Public version of the below, checking relevant preconditions first. /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and /// returns `(None, Vec::new())`. - pub fn maybe_free_holding_cell_htlcs(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger { + pub fn maybe_free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { if self.channel_state >= ChannelState::ChannelReady as u32 && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { self.free_holding_cell_htlcs(logger) - } else { Ok((None, Vec::new())) } + } else { (None, Vec::new()) } } /// Frees any pending commitment updates in the holding cell, generating the relevant messages /// for our counterparty. - fn free_holding_cell_htlcs(&mut self, logger: &L) -> Result<(Option<(msgs::CommitmentUpdate, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where L::Target: Logger { + fn free_holding_cell_htlcs(&mut self, logger: &L) -> (Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>) where L::Target: Logger { assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0); if self.holding_cell_htlc_updates.len() != 0 || self.holding_cell_update_fee.is_some() { log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.holding_cell_htlc_updates.len(), @@ -3319,7 +3400,7 @@ impl Channel { } } if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() { - return Ok((None, htlcs_to_fail)); + return (None, htlcs_to_fail); } let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() { self.send_update_fee(feerate, false, logger) @@ -3327,8 +3408,8 @@ impl Channel { None }; - let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check(logger)?; - // send_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id // but we want them to be strictly increasing by one, so reset it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); @@ -3337,16 +3418,11 @@ impl Channel { log_bytes!(self.channel_id()), if update_fee.is_some() { "a fee update, " } else { "" }, update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len()); - Ok((Some((msgs::CommitmentUpdate { - update_add_htlcs, - update_fulfill_htlcs, - update_fail_htlcs, - update_fail_malformed_htlcs: Vec::new(), - update_fee, - commitment_signed, - }, monitor_update)), htlcs_to_fail)) + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.pending_monitor_updates.push(monitor_update); + (Some(self.pending_monitor_updates.last().unwrap()), htlcs_to_fail) } else { - Ok((None, Vec::new())) + (None, Vec::new()) } } @@ -3355,7 +3431,7 @@ impl Channel { /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail, /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. - pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result + pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, &ChannelMonitorUpdate), ChannelError> where L::Target: Logger, { if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) { @@ -3542,8 +3618,8 @@ impl Channel { // When the monitor updating is restored we'll call get_last_commitment_update(), // which does not update state, but we're definitely now awaiting a remote revoke // before we can step forward any more, so set it here. - let (_, mut additional_update) = self.send_commitment_no_status_check(logger)?; - // send_commitment_no_status_check may bump latest_monitor_id but we want them to be + let mut additional_update = self.build_commitment_no_status_check(logger); + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); @@ -3552,71 +3628,41 @@ impl Channel { self.monitor_pending_failures.append(&mut revoked_htlcs); self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id())); - return Ok(RAAUpdates { - commitment_update: None, finalized_claimed_htlcs: Vec::new(), - accepted_htlcs: Vec::new(), failed_htlcs: Vec::new(), - monitor_update, - holding_cell_failed_htlcs: Vec::new() - }); + self.pending_monitor_updates.push(monitor_update); + return Ok((Vec::new(), self.pending_monitor_updates.last().unwrap())); } - match self.free_holding_cell_htlcs(logger)? { - (Some((mut commitment_update, mut additional_update)), htlcs_to_fail) => { - commitment_update.update_fail_htlcs.reserve(update_fail_htlcs.len()); - for fail_msg in update_fail_htlcs.drain(..) { - commitment_update.update_fail_htlcs.push(fail_msg); - } - commitment_update.update_fail_malformed_htlcs.reserve(update_fail_malformed_htlcs.len()); - for fail_msg in update_fail_malformed_htlcs.drain(..) { - commitment_update.update_fail_malformed_htlcs.push(fail_msg); - } - + match self.free_holding_cell_htlcs(logger) { + (Some(_), htlcs_to_fail) => { + let mut additional_update = self.pending_monitor_updates.pop().unwrap(); // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - Ok(RAAUpdates { - commitment_update: Some(commitment_update), - finalized_claimed_htlcs, - accepted_htlcs: to_forward_infos, - failed_htlcs: revoked_htlcs, - monitor_update, - holding_cell_failed_htlcs: htlcs_to_fail - }) + self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); + self.pending_monitor_updates.push(monitor_update); + Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap())) }, (None, htlcs_to_fail) => { if require_commitment { - let (commitment_signed, mut additional_update) = self.send_commitment_no_status_check(logger)?; + let mut additional_update = self.build_commitment_no_status_check(logger); - // send_commitment_no_status_check may bump latest_monitor_id but we want them to be + // build_commitment_no_status_check may bump latest_monitor_id but we want them to be // strictly increasing by one, so decrement it here. self.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); - Ok(RAAUpdates { - commitment_update: Some(msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs, - update_fail_malformed_htlcs, - update_fee: None, - commitment_signed - }), - finalized_claimed_htlcs, - accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs, - monitor_update, holding_cell_failed_htlcs: htlcs_to_fail - }) + self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); + self.pending_monitor_updates.push(monitor_update); + Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap())) } else { log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id())); - Ok(RAAUpdates { - commitment_update: None, - finalized_claimed_htlcs, - accepted_htlcs: to_forward_infos, failed_htlcs: revoked_htlcs, - monitor_update, holding_cell_failed_htlcs: htlcs_to_fail - }) + self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); + self.pending_monitor_updates.push(monitor_update); + Ok((htlcs_to_fail, self.pending_monitor_updates.last().unwrap())) } } } @@ -3768,15 +3814,17 @@ impl Channel { } /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted. - /// This must be called immediately after the [`chain::Watch`] call which returned - /// [`ChannelMonitorUpdateStatus::InProgress`]. + /// This must be called before we return the [`ChannelMonitorUpdate`] back to the + /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor + /// update completes (potentially immediately). /// The messages which were generated with the monitor update must *not* have been sent to the /// remote end, and must instead have been dropped. They will be regenerated when /// [`Self::monitor_updating_restored`] is called. /// + /// [`ChannelManager`]: super::channelmanager::ChannelManager /// [`chain::Watch`]: crate::chain::Watch /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress - pub fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool, + fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, mut pending_finalized_claimed_htlcs: Vec @@ -3803,6 +3851,7 @@ impl Channel { { assert_eq!(self.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32); self.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32); + self.pending_monitor_updates.clear(); // If we're past (or at) the FundingSent stage on an outbound channel, try to // (re-)broadcast the funding transaction as we may have declined to broadcast it when we @@ -3913,6 +3962,8 @@ impl Channel { channel_id: self.channel_id, per_commitment_secret, next_per_commitment_point, + #[cfg(taproot)] + next_local_nonce: None, } } @@ -4008,32 +4059,27 @@ impl Channel { } if msg.next_remote_commitment_number > 0 { - match msg.data_loss_protect { - OptionalField::Present(ref data_loss) => { - let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx); - let given_secret = SecretKey::from_slice(&data_loss.your_last_per_commitment_secret) - .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; - if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) { - return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx); + let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret) + .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?; + if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) { + return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned())); + } + if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { + macro_rules! log_and_panic { + ($err_msg: expr) => { + log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); + panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); } - if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number { - macro_rules! log_and_panic { - ($err_msg: expr) => { - log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); - panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id)); - } - } - log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ - This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ - More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ - If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ - ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ - ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ - Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ - See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); - } - }, - OptionalField::Absent => {} + } + log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\ + This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\ + More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\ + If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\ + ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\ + ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\ + Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\ + See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info."); } } @@ -4280,7 +4326,7 @@ impl Channel { pub fn shutdown( &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown - ) -> Result<(Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError> + ) -> Result<(Option, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError> where SP::Target: SignerProvider { if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 { @@ -4320,7 +4366,10 @@ impl Channel { Some(_) => false, None => { assert!(send_shutdown); - let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey(); + let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => scriptpubkey, + Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())), + }; if !shutdown_scriptpubkey.is_compatible(their_features) { return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey))); } @@ -4336,12 +4385,15 @@ impl Channel { let monitor_update = if update_shutdown_script { self.latest_monitor_update_id += 1; - Some(ChannelMonitorUpdate { + let monitor_update = ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey: self.get_closing_scriptpubkey(), }], - }) + }; + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.pending_monitor_updates.push(monitor_update); + Some(self.pending_monitor_updates.last().unwrap()) } else { None }; let shutdown = if send_shutdown { Some(msgs::Shutdown { @@ -4552,6 +4604,13 @@ impl Channel { self.channel_id } + // Return the `temporary_channel_id` used during channel establishment. + // + // Will return `None` for channels created prior to LDK version 0.0.115. + pub fn temporary_channel_id(&self) -> Option<[u8; 32]> { + self.temporary_channel_id + } + pub fn minimum_depth(&self) -> Option { self.minimum_depth } @@ -4696,6 +4755,21 @@ impl Channel { self.prev_config.map(|prev_config| prev_config.0) } + // Checks whether we should emit a `ChannelPending` event. + pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool { + self.is_funding_initiated() && !self.channel_pending_event_emitted + } + + // Returns whether we already emitted a `ChannelPending` event. + pub(crate) fn channel_pending_event_emitted(&self) -> bool { + self.channel_pending_event_emitted + } + + // Remembers that we already emitted a `ChannelPending` event. + pub(crate) fn set_channel_pending_event_emitted(&mut self) { + self.channel_pending_event_emitted = true; + } + // Checks whether we should emit a `ChannelReady` event. pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool { self.is_usable() && !self.channel_ready_event_emitted @@ -4779,7 +4853,7 @@ impl Channel { }) } - pub fn get_feerate(&self) -> u32 { + pub fn get_feerate_sat_per_1000_weight(&self) -> u32 { self.feerate_per_kw } @@ -4891,6 +4965,10 @@ impl Channel { (self.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 } + pub fn get_next_monitor_update(&self) -> Option<&ChannelMonitorUpdate> { + self.pending_monitor_updates.first() + } + /// Returns true if funding_created was sent/received. pub fn is_funding_initiated(&self) -> bool { self.channel_state >= ChannelState::FundingSent as u32 @@ -5251,7 +5329,7 @@ impl Channel { htlc_minimum_msat: self.holder_htlc_minimum_msat, feerate_per_kw: self.feerate_per_kw as u32, to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: OUR_MAX_HTLCS, + max_accepted_htlcs: self.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, payment_point: keys.payment_point, @@ -5259,7 +5337,7 @@ impl Channel { htlc_basepoint: keys.htlc_basepoint, first_per_commitment_point, channel_flags: if self.config.announced_channel {1} else {0}, - shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey { + shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), @@ -5318,18 +5396,20 @@ impl Channel { htlc_minimum_msat: self.holder_htlc_minimum_msat, minimum_depth: self.minimum_depth.unwrap(), to_self_delay: self.get_holder_selected_contest_delay(), - max_accepted_htlcs: OUR_MAX_HTLCS, + max_accepted_htlcs: self.holder_max_accepted_htlcs, funding_pubkey: keys.funding_pubkey, revocation_basepoint: keys.revocation_basepoint, payment_point: keys.payment_point, delayed_payment_basepoint: keys.delayed_payment_basepoint, htlc_basepoint: keys.htlc_basepoint, first_per_commitment_point, - shutdown_scriptpubkey: OptionalField::Present(match &self.shutdown_scriptpubkey { + shutdown_scriptpubkey: Some(match &self.shutdown_scriptpubkey { Some(script) => script.clone().into_inner(), None => Builder::new().into_script(), }), channel_type: Some(self.channel_type.clone()), + #[cfg(taproot)] + next_local_nonce: None, } } @@ -5394,7 +5474,11 @@ impl Channel { temporary_channel_id, funding_txid: funding_txo.txid, funding_output_index: funding_txo.index, - signature + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + #[cfg(taproot)] + next_local_nonce: None, }) } @@ -5581,19 +5665,13 @@ impl Channel { // valid, and valid in fuzzing mode's arbitrary validity criteria: let mut pk = [2; 33]; pk[1] = 0xff; let dummy_pubkey = PublicKey::from_slice(&pk).unwrap(); - let data_loss_protect = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { + let remote_last_secret = if self.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER { let remote_last_secret = self.commitment_secrets.get_secret(self.cur_counterparty_commitment_transaction_number + 2).unwrap(); log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), log_bytes!(self.channel_id())); - OptionalField::Present(DataLossProtect { - your_last_per_commitment_secret: remote_last_secret, - my_current_per_commitment_point: dummy_pubkey - }) + remote_last_secret } else { log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", log_bytes!(self.channel_id())); - OptionalField::Present(DataLossProtect { - your_last_per_commitment_secret: [0;32], - my_current_per_commitment_point: dummy_pubkey, - }) + [0;32] }; msgs::ChannelReestablish { channel_id: self.channel_id(), @@ -5615,7 +5693,11 @@ impl Channel { // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't // overflow here. next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_counterparty_commitment_transaction_number - 1, - data_loss_protect, + your_last_per_commitment_secret: remote_last_secret, + my_current_per_commitment_point: dummy_pubkey, + // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction construction but have not received `tx_signatures` + // we MUST set `next_funding_txid` to the txid of that interactive transaction, else we MUST NOT set it. + next_funding_txid: None, } } @@ -5791,8 +5873,7 @@ impl Channel { Ok(Some(res)) } - /// Only fails in case of bad keys - fn send_commitment_no_status_check(&mut self, logger: &L) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> where L::Target: Logger { + fn build_commitment_no_status_check(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger { log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed..."); // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we // fail to generate this, we still are at least at a position where upgrading their status @@ -5825,15 +5906,9 @@ impl Channel { } self.resend_order = RAACommitmentOrder::RevokeAndACKFirst; - let (res, counterparty_commitment_txid, htlcs) = match self.send_commitment_no_state_update(logger) { - Ok((res, (counterparty_commitment_tx, mut htlcs))) => { - // Update state now that we've passed all the can-fail calls... - let htlcs_no_ref: Vec<(HTLCOutputInCommitment, Option>)> = - htlcs.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect(); - (res, counterparty_commitment_tx, htlcs_no_ref) - }, - Err(e) => return Err(e), - }; + let (counterparty_commitment_txid, mut htlcs_ref) = self.build_commitment_no_state_update(logger); + let htlcs: Vec<(HTLCOutputInCommitment, Option>)> = + htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect(); if self.announcement_sigs_state == AnnouncementSigsState::MessageSent { self.announcement_sigs_state = AnnouncementSigsState::Committed; @@ -5850,16 +5925,13 @@ impl Channel { }] }; self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32; - Ok((res, monitor_update)) + monitor_update } - /// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation - /// when we shouldn't change HTLC/channel state. - fn send_commitment_no_state_update(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger { + fn build_commitment_no_state_update(&self, logger: &L) -> (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys(); let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); - let (signature, htlc_signatures); #[cfg(any(test, fuzzing))] { @@ -5879,6 +5951,21 @@ impl Channel { } } + (counterparty_commitment_txid, commitment_stats.htlcs_included) + } + + /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed + /// generation when we shouldn't change HTLC/channel state. + fn send_commitment_no_state_update(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger { + // Get the fee tests from `build_commitment_no_state_update` + #[cfg(any(test, fuzzing))] + self.build_commitment_no_state_update(logger); + + let counterparty_keys = self.build_remote_transaction_keys(); + let commitment_stats = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let counterparty_commitment_txid = commitment_stats.tx.trust().txid(); + let (signature, htlc_signatures); + { let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len()); for &(ref htlc, _) in commitment_stats.htlcs_included.iter() { @@ -5908,19 +5995,25 @@ impl Channel { channel_id: self.channel_id, signature, htlc_signatures, + #[cfg(taproot)] + partial_signature_with_nonce: None, }, (counterparty_commitment_txid, commitment_stats.htlcs_included))) } - /// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction - /// to send to the remote peer in one go. + /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment + /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go. /// /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on - /// [`Self::send_htlc`] and [`Self::send_commitment_no_state_update`] for more info. - pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { - match self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger)? { - Some(update_add_htlc) => { - let (commitment_signed, monitor_update) = self.send_commitment_no_status_check(logger)?; - Ok(Some((update_add_htlc, commitment_signed, monitor_update))) + /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info. + pub fn send_htlc_and_commit(&mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, logger: &L) -> Result, ChannelError> where L::Target: Logger { + let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, false, logger); + if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } } + match send_res? { + Some(_) => { + let monitor_update = self.build_commitment_no_status_check(logger); + self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.pending_monitor_updates.push(monitor_update); + Ok(Some(self.pending_monitor_updates.last().unwrap())) }, None => Ok(None) } @@ -5946,8 +6039,12 @@ impl Channel { /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option) - -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> + /// + /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no + /// [`ChannelMonitorUpdate`] will be returned). + pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, + target_feerate_sats_per_kw: Option, override_shutdown_script: Option) + -> Result<(msgs::Shutdown, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError> where SP::Target: SignerProvider { for htlc in self.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { @@ -5962,21 +6059,42 @@ impl Channel { return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()}); } } + if self.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() { + return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()}); + } assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0); if self.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 { return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); } + // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown + // script is set, we just force-close and call it a day. + let mut chan_closed = false; + if self.channel_state < ChannelState::FundingSent as u32 { + chan_closed = true; + } + let update_shutdown_script = match self.shutdown_scriptpubkey { Some(_) => false, - None => { - let shutdown_scriptpubkey = signer_provider.get_shutdown_scriptpubkey(); + None if !chan_closed => { + // use override shutdown script if provided + let shutdown_scriptpubkey = match override_shutdown_script { + Some(script) => script, + None => { + // otherwise, use the shutdown scriptpubkey provided by the signer + match signer_provider.get_shutdown_scriptpubkey() { + Ok(scriptpubkey) => scriptpubkey, + Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}), + } + }, + }; if !shutdown_scriptpubkey.is_compatible(their_features) { return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() }); } self.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); true }, + None => false, }; // From here on out, we may not fail! @@ -5990,12 +6108,15 @@ impl Channel { let monitor_update = if update_shutdown_script { self.latest_monitor_update_id += 1; - Some(ChannelMonitorUpdate { + let monitor_update = ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ShutdownScript { scriptpubkey: self.get_closing_scriptpubkey(), }], - }) + }; + self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.pending_monitor_updates.push(monitor_update); + Some(self.pending_monitor_updates.last().unwrap()) } else { None }; let shutdown = msgs::Shutdown { channel_id: self.channel_id, @@ -6016,6 +6137,9 @@ impl Channel { } }); + debug_assert!(!self.is_shutdown() || monitor_update.is_none(), + "we can't both complete shutdown and return a monitor update"); + Ok((shutdown, monitor_update, dropped_outbound_htlcs)) } @@ -6052,7 +6176,7 @@ impl Channel { // monitor update to the user, even if we return one). // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 { - self.latest_monitor_update_id += 1; + self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; Some((funding_txo, ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], @@ -6094,8 +6218,8 @@ impl Writeable for ChannelUpdateStatus { // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1. match self { ChannelUpdateStatus::Enabled => 0u8.write(writer)?, - ChannelUpdateStatus::DisabledStaged => 0u8.write(writer)?, - ChannelUpdateStatus::EnabledStaged => 1u8.write(writer)?, + ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?, + ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?, ChannelUpdateStatus::Disabled => 1u8.write(writer)?, } Ok(()) @@ -6391,6 +6515,7 @@ impl Writeable for Channel { if self.holder_max_htlc_value_in_flight_msat != Self::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis, &old_max_in_flight_percent_config) { Some(self.holder_max_htlc_value_in_flight_msat) } else { None }; + let channel_pending_event_emitted = Some(self.channel_pending_event_emitted); let channel_ready_event_emitted = Some(self.channel_ready_event_emitted); // `user_id` used to be a single u64 value. In order to remain backwards compatible with @@ -6398,6 +6523,8 @@ impl Writeable for Channel { // we write the high bytes as an option here. let user_id_high_opt = Some((self.user_id >> 64) as u64); + let holder_max_accepted_htlcs = if self.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.holder_max_accepted_htlcs) }; + write_tlv_fields!(writer, { (0, self.announcement_sigs, option), // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a @@ -6423,6 +6550,9 @@ impl Writeable for Channel { (23, channel_ready_event_emitted, option), (25, user_id_high_opt, option), (27, self.channel_keys_id, required), + (28, holder_max_accepted_htlcs, option), + (29, self.temporary_channel_id, option), + (31, channel_pending_event_emitted, option), }); Ok(()) @@ -6489,7 +6619,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let value_to_self_msat = Readable::read(reader)?; let pending_inbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, OUR_MAX_HTLCS as usize)); + + let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..pending_inbound_htlc_count { pending_inbound_htlcs.push(InboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -6507,7 +6638,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch } let pending_outbound_htlc_count: u64 = Readable::read(reader)?; - let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, OUR_MAX_HTLCS as usize)); + let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..pending_outbound_htlc_count { pending_outbound_htlcs.push(OutboundHTLCOutput { htlc_id: Readable::read(reader)?, @@ -6536,7 +6667,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch } let holding_cell_htlc_update_count: u64 = Readable::read(reader)?; - let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, OUR_MAX_HTLCS as usize*2)); + let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2)); for _ in 0..holding_cell_htlc_update_count { holding_cell_htlc_updates.push(match ::read(reader)? { 0 => HTLCUpdateAwaitingACK::AddHTLC { @@ -6569,13 +6700,13 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let monitor_pending_commitment_signed = Readable::read(reader)?; let monitor_pending_forwards_count: u64 = Readable::read(reader)?; - let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, OUR_MAX_HTLCS as usize)); + let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..monitor_pending_forwards_count { monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?)); } let monitor_pending_failures_count: u64 = Readable::read(reader)?; - let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, OUR_MAX_HTLCS as usize)); + let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize)); for _ in 0..monitor_pending_failures_count { monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)); } @@ -6690,10 +6821,13 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent); let mut latest_inbound_scid_alias = None; let mut outbound_scid_alias = None; + let mut channel_pending_event_emitted = None; let mut channel_ready_event_emitted = None; let mut user_id_high_opt: Option = None; let mut channel_keys_id: Option<[u8; 32]> = None; + let mut temporary_channel_id: Option<[u8; 32]> = None; + let mut holder_max_accepted_htlcs: Option = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -6714,6 +6848,9 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch (23, channel_ready_event_emitted, option), (25, user_id_high_opt, option), (27, channel_keys_id, option), + (28, holder_max_accepted_htlcs, option), + (29, temporary_channel_id, option), + (31, channel_pending_event_emitted, option), }); let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id { @@ -6766,6 +6903,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // separate u64 values. let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64); + let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS); + Ok(Channel { user_id, @@ -6778,6 +6917,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch inbound_handshake_limits_override: None, channel_id, + temporary_channel_id, channel_state, announcement_sigs_state: announcement_sigs_state.unwrap(), secp_ctx, @@ -6793,6 +6933,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch cur_counterparty_commitment_transaction_number, value_to_self_msat, + holder_max_accepted_htlcs, pending_inbound_htlcs, pending_outbound_htlcs, holding_cell_htlc_updates, @@ -6870,6 +7011,7 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing outbound_scid_alias: outbound_scid_alias.unwrap_or(0), + channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), #[cfg(any(test, fuzzing))] @@ -6877,6 +7019,8 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_type: channel_type.unwrap(), channel_keys_id, + + pending_monitor_updates: Vec::new(), }) } } @@ -6897,14 +7041,15 @@ mod tests { use crate::ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator}; use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS}; use crate::ln::features::ChannelTypeFeatures; - use crate::ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT}; + use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT}; use crate::ln::script::ShutdownScript; use crate::ln::chan_utils; use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight}; use crate::chain::BestBlock; use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget}; - use crate::chain::keysinterface::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider}; + use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider}; use crate::chain::transaction::OutPoint; + use crate::routing::router::Path; use crate::util::config::UserConfig; use crate::util::enforcing_trait_impls::EnforcingSigner; use crate::util::errors::APIError; @@ -6967,17 +7112,17 @@ mod tests { fn read_chan_signer(&self, _data: &[u8]) -> Result { panic!(); } - fn get_destination_script(&self) -> Script { + fn get_destination_script(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize()); - Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script() + Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script()) } - fn get_shutdown_scriptpubkey(&self) -> ShutdownScript { + fn get_shutdown_scriptpubkey(&self) -> Result { let secp_ctx = Secp256k1::signing_only(); let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(); - ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)) + Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key))) } } @@ -7082,12 +7227,10 @@ mod tests { cltv_expiry: 200000000, state: OutboundHTLCState::Committed, source: HTLCSource::OutboundRoute { - path: Vec::new(), + path: Path { hops: Vec::new(), blinded_tail: None }, session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(), first_hop_htlc_msat: 548, payment_id: PaymentId([42; 32]), - payment_secret: None, - payment_params: None, } }); @@ -7161,7 +7304,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let seed = [42; 32]; let network = Network::Testnet; - let best_block = BestBlock::from_genesis(network); + let best_block = BestBlock::from_network(network); let chain_hash = best_block.block_hash(); let keys_provider = test_utils::TestKeysInterface::new(&seed, network); @@ -7188,7 +7331,7 @@ mod tests { }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap(); - let (funding_signed_msg, _, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); + let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, best_block, &&keys_provider, &&logger).unwrap(); // Node B --> Node A: funding signed let _ = node_a_chan.funding_signed(&funding_signed_msg, best_block, &&keys_provider, &&logger); @@ -7199,12 +7342,7 @@ mod tests { let msg = node_b_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number - match msg.data_loss_protect { - OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => { - assert_eq!(your_last_per_commitment_secret, [0; 32]); - }, - _ => panic!() - } + assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); // Check that the commitment point in Node A's channel_reestablish message // is sane. @@ -7212,12 +7350,7 @@ mod tests { let msg = node_a_chan.get_channel_reestablish(&&logger); assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number - match msg.data_loss_protect { - OptionalField::Present(DataLossProtect { your_last_per_commitment_secret, .. }) => { - assert_eq!(your_last_per_commitment_secret, [0; 32]); - }, - _ => panic!() - } + assert_eq!(msg.your_last_per_commitment_secret, [0; 32]); } #[test] @@ -7399,7 +7532,7 @@ mod tests { } } - #[cfg(not(feature = "grind_signatures"))] + #[cfg(feature = "_test_vectors")] #[test] fn outbound_commitment_test() { use bitcoin::util::sighash; @@ -7408,7 +7541,7 @@ mod tests { use bitcoin::hashes::hex::FromHex; use bitcoin::hash_types::Txid; use bitcoin::secp256k1::Message; - use crate::chain::keysinterface::EcdsaChannelSigner; + use crate::sign::EcdsaChannelSigner; use crate::ln::PaymentPreimage; use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys}; use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; @@ -7432,6 +7565,7 @@ mod tests { [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff], 10_000_000, [0; 32], + [0; 32], ); assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..], @@ -7441,7 +7575,7 @@ mod tests { let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); let mut config = UserConfig::default(); config.channel_handshake_config.announced_channel = false; - let mut chan = Channel::::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 100000, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test + let mut chan = Channel::::new_outbound(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test chan.holder_dust_limit_satoshis = 546; chan.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel @@ -7584,6 +7718,11 @@ mod tests { } } } + // anchors: simple commitment tx with no HTLCs and single anchor + test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658", + "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {}); + // simple commitment tx with no HTLCs chan.value_to_self_msat = 7000000000; @@ -8034,7 +8173,7 @@ mod tests { chan.pending_outbound_htlcs.push({ let mut out = OutboundHTLCOutput{ htlc_id: 6, - amount_msat: 5000000, + amount_msat: 5000001, cltv_expiry: 506, payment_hash: PaymentHash([0; 32]), state: OutboundHTLCState::Committed, @@ -8056,40 +8195,40 @@ mod tests { out }); - test_commitment!("30440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df9", - "3045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f", - "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a79f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f014730440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8", + "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { { 0, - "304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c6", - "304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c", - "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc34000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050048304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c60148304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" }, + "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5", + "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b", + "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" }, { 1, - "304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5", - "30450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d39", - "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3401000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5014830450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d3901008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" }, + "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a", + "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d", + "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" }, { 2, - "30440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f", - "30440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e511", - "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3402000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f014730440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e51101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" } + "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc", + "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb", + "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" } } ); - test_commitment_with_anchors!("3045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a68948", - "3045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e45", - "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aae9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c37ac4fc8538677631230c4b286f36b6f54c51fb4b34ef0bd0ba219ba47452630220278e09a745454ea380f3694392ed113762c68dd209b48360f547541088be9e4501483045022100c592f6b80d35b4f5d1e3bc9788f51141a0065be6013bad53a1977f7c444651660220278ac06ead9016bfb8dc476f186eabace2b02793b2f308442f5b0d5f24a6894801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { + test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725", + "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76", + "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", { { 0, - "3045022100de8a0649d54fd2e4fc04502c77df9b65da839bbd01854f818f129338b99564b2022009528dbb12c00e874cb2149b1dccc600c69ea5e4042ebf584984fcb029c2d1ec", - "304402203e7c2622fa3ca29355d37a0ea991bfd7cdb54e6122a1d98d3229d092131f55cd022055263f7f8f32f4cd2f86da63ca106bd7badf0b19ee9833d80cd3b9216eeafd74", - "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe2902000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100de8a0649d54fd2e4fc04502c77df9b65da839bbd01854f818f129338b99564b2022009528dbb12c00e874cb2149b1dccc600c69ea5e4042ebf584984fcb029c2d1ec8347304402203e7c2622fa3ca29355d37a0ea991bfd7cdb54e6122a1d98d3229d092131f55cd022055263f7f8f32f4cd2f86da63ca106bd7badf0b19ee9833d80cd3b9216eeafd74012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" }, + "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2", + "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424", + "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" }, { 1, - "3045022100de6eee8474376ea316d007b33103b4543a46bdf6fda5cbd5902b28a5bc14584f022002989e7b4f7813b77acbe4babcf96d7ffbbe0bf14cba24672364f8e591479edb", - "3045022100c10688346a9d84647bde7027da07f0d79c6d4129307e4c6c9aea7bdbf25ac3350220269104209793c32c47491698c4e46ebea9c3293a1e4403f9abda39f79698f6b5", - "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe290300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100de6eee8474376ea316d007b33103b4543a46bdf6fda5cbd5902b28a5bc14584f022002989e7b4f7813b77acbe4babcf96d7ffbbe0bf14cba24672364f8e591479edb83483045022100c10688346a9d84647bde7027da07f0d79c6d4129307e4c6c9aea7bdbf25ac3350220269104209793c32c47491698c4e46ebea9c3293a1e4403f9abda39f79698f6b501008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" }, + "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c", + "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b", + "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" }, { 2, - "3045022100fe87da8124ceecbcabb9d599c5339f40277c7c7406514fafbccbf180c7c09cf40220429c7fb6d0fd3705e931ab1219ab0432af38ae4d676008cc1964fbeb8cd35d2e", - "3044022040ac769a851da31d8e4863e5f94719204f716c82a1ce6d6c52193d9a33b84bce022035df97b078ce80f20dca2109e4c6075af0b50148811452e7290e68b2680fced4", - "02000000000101aa443fb63abc1e8c754f98a7b96c27cb02b21d891d1242a16b630dc32c2afe290400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fe87da8124ceecbcabb9d599c5339f40277c7c7406514fafbccbf180c7c09cf40220429c7fb6d0fd3705e931ab1219ab0432af38ae4d676008cc1964fbeb8cd35d2e83473044022040ac769a851da31d8e4863e5f94719204f716c82a1ce6d6c52193d9a33b84bce022035df97b078ce80f20dca2109e4c6075af0b50148811452e7290e68b2680fced401008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" } + "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1", + "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b", + "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" } } ); }