X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=4ed5b63f213f70d80305d5951386875eb129197c;hb=dd15ab03944cfb6e71dd11b806ace1b4fa225bef;hp=e553e8e534055ae9cf3a8d64e0acc83aa04e30a6;hpb=0e83e91d7ae6e402ca24164e920cf12b486cd17c;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index e553e8e5..4ed5b63f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -64,7 +64,7 @@ use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, Maybe use crate::util::logger::{Level, Logger}; use crate::util::errors::APIError; -use alloc::collections::BTreeMap; +use alloc::collections::{btree_map, BTreeMap}; use crate::io; use crate::prelude::*; @@ -803,7 +803,7 @@ pub type SimpleArcChannelManager = ChannelManager< Arc>>, Arc, - Arc>>, Arc>>>, + Arc>>, Arc>>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer>>, Arc>, >>, @@ -832,7 +832,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = &'e DefaultRouter< &'f NetworkGraph<&'g L>, &'g L, - &'h Mutex, &'g L>>, + &'h RwLock, &'g L>>, ProbabilisticScoringFeeParameters, ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L> >, @@ -840,6 +840,9 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = >; /// A trivial trait which describes any [`ChannelManager`]. +/// +/// This is not exported to bindings users as general cover traits aren't useful in other +/// languages. pub trait AChannelManager { /// A type implementing [`chain::Watch`]. type Watch: chain::Watch + ?Sized; @@ -1201,6 +1204,12 @@ where /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the /// Notifier the lock contains sends out a notification when the lock is released. total_consistency_lock: RwLock<()>, + /// Tracks the progress of channels going through batch funding by whether funding_signed was + /// received and the monitor has been persisted. + /// + /// This information does not need to be persisted as funding nodes can forget + /// unfunded channels upon disconnection. + funding_batch_states: Mutex>>, background_events_processed_since_startup: AtomicBool, @@ -1442,12 +1451,6 @@ pub struct ChannelCounterparty { } /// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`] -/// -/// Balances of a channel are available through [`ChainMonitor::get_claimable_balances`] and -/// [`ChannelMonitor::get_claimable_balances`], calculated with respect to the corresponding on-chain -/// transactions. -/// -/// [`ChainMonitor::get_claimable_balances`]: crate::chain::chainmonitor::ChainMonitor::get_claimable_balances #[derive(Clone, Debug, PartialEq)] pub struct ChannelDetails { /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes, @@ -1529,11 +1532,24 @@ pub struct ChannelDetails { /// /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115. pub feerate_sat_per_1000_weight: Option, + /// Our total balance. This is the amount we would get if we close the channel. + /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this + /// amount is not likely to be recoverable on close. + /// + /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose + /// balance is not available for inclusion in new outbound HTLCs). This further does not include + /// any pending outgoing HTLCs which are awaiting some other resolution to be sent. + /// This does not consider any on-chain fees. + /// + /// See also [`ChannelDetails::outbound_capacity_msat`] + pub balance_msat: u64, /// The available outbound capacity for sending HTLCs to the remote peer. This does not include /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not /// available for inclusion in new outbound HTLCs). This further does not include any pending /// outgoing HTLCs which are awaiting some other resolution to be sent. /// + /// See also [`ChannelDetails::balance_msat`] + /// /// This value is not exact. Due to various in-flight changes, feerate changes, and our /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we /// should be able to spend nearly this amount. @@ -1543,8 +1559,8 @@ pub struct ChannelDetails { /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us /// to use a limit as close as possible to the HTLC limit we can currently send. /// - /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`] and - /// [`ChannelDetails::outbound_capacity_msat`]. + /// See also [`ChannelDetails::next_outbound_htlc_minimum_msat`], + /// [`ChannelDetails::balance_msat`], and [`ChannelDetails::outbound_capacity_msat`]. pub next_outbound_htlc_limit_msat: u64, /// The minimum value for sending a single HTLC to the remote peer. This is the equivalent of /// [`ChannelDetails::next_outbound_htlc_limit_msat`] but represents a lower-bound, rather than @@ -1674,6 +1690,7 @@ impl ChannelDetails { channel_value_satoshis: context.get_value_satoshis(), feerate_sat_per_1000_weight: Some(context.get_feerate_sat_per_1000_weight()), unspendable_punishment_reserve: to_self_reserve_satoshis, + balance_msat: balance.balance_msat, inbound_capacity_msat: balance.inbound_capacity_msat, outbound_capacity_msat: balance.outbound_capacity_msat, next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat, @@ -1788,7 +1805,7 @@ macro_rules! handle_error { let mut msg_events = Vec::with_capacity(2); if let Some((shutdown_res, update_option)) = shutdown_finish { - $self.finish_force_close_channel(shutdown_res); + $self.finish_close_channel(shutdown_res); if let Some(update) = update_option { msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update @@ -2025,9 +2042,54 @@ macro_rules! handle_monitor_update_completion { } let channel_id = $chan.context.channel_id(); + let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid(); core::mem::drop($peer_state_lock); core::mem::drop($per_peer_state_lock); + // If the channel belongs to a batch funding transaction, the progress of the batch + // should be updated as we have received funding_signed and persisted the monitor. + if let Some(txid) = unbroadcasted_batch_funding_txid { + let mut funding_batch_states = $self.funding_batch_states.lock().unwrap(); + let mut batch_completed = false; + if let Some(batch_state) = funding_batch_states.get_mut(&txid) { + let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| ( + *chan_id == channel_id && + *pubkey == counterparty_node_id + )); + if let Some(channel_state) = channel_state { + channel_state.2 = true; + } else { + debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update"); + } + batch_completed = batch_state.iter().all(|(_, _, completed)| *completed); + } else { + debug_assert!(false, "Missing batch state for channel which completed initial monitor update"); + } + + // When all channels in a batched funding transaction have become ready, it is not necessary + // to track the progress of the batch anymore and the state of the channels can be updated. + if batch_completed { + let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten(); + let per_peer_state = $self.per_peer_state.read().unwrap(); + let mut batch_funding_tx = None; + for (channel_id, counterparty_node_id, _) in removed_batch_state { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { + batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding()); + chan.set_batch_ready(); + let mut pending_events = $self.pending_events.lock().unwrap(); + emit_channel_pending_event!(pending_events, chan); + } + } + } + if let Some(tx) = batch_funding_tx { + log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.txid()); + $self.tx_broadcaster.broadcast_transactions(&[&tx]); + } + } + } + $self.handle_monitor_update_completion_actions(update_actions); if let Some(forwards) = htlc_forwards { @@ -2230,9 +2292,9 @@ where pending_background_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), background_events_processed_since_startup: AtomicBool::new(false), - event_persist_notifier: Notifier::new(), needs_persist_flag: AtomicBool::new(false), + funding_batch_states: Mutex::new(BTreeMap::new()), entropy_source, node_signer, @@ -2497,6 +2559,7 @@ where let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; + let mut shutdown_result = None; loop { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -2511,6 +2574,7 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let funding_txo_opt = chan.context.get_funding_txo(); let their_features = &peer_state.latest_features; + let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; failed_htlcs = htlcs; @@ -2541,6 +2605,7 @@ where }); } self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); + shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid)); } } break; @@ -2551,6 +2616,8 @@ where // it does not exist for this peer. Either way, we can attempt to force-close it. // // An appropriate error will be returned for non-existence of the channel if that's the case. + mem::drop(peer_state_lock); + mem::drop(per_peer_state); return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) }, } @@ -2562,6 +2629,10 @@ where self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } + if let Some(shutdown_result) = shutdown_result { + self.finish_close_channel(shutdown_result); + } + Ok(()) } @@ -2569,11 +2640,11 @@ where /// will be accepted on the given channel, and after additional timeout/the closing of all /// pending HTLCs, the channel will be closed on chain. /// - /// * If we are the channel initiator, we will pay between our [`Background`] and - /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee - /// estimate. + /// * If we are the channel initiator, we will pay between our [`ChannelCloseMinimum`] and + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`] + /// fee estimate. /// * If our counterparty is the channel initiator, we will require a channel closing - /// transaction feerate of at least our [`Background`] feerate or the feerate which + /// transaction feerate of at least our [`ChannelCloseMinimum`] feerate or the feerate which /// would appear on a force-closure transaction, whichever is lower. We will allow our /// counterparty to pay as much fee as they'd like, however. /// @@ -2585,8 +2656,8 @@ where /// channel. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis - /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background - /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`ChannelCloseMinimum`]: crate::chain::chaininterface::ConfirmationTarget::ChannelCloseMinimum + /// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, None, None) @@ -2600,8 +2671,8 @@ where /// the channel being closed or not: /// * If we are the channel initiator, we will pay at least this feerate on the closing /// transaction. The upper-bound is set by - /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee - /// estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). + /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`] + /// fee estimate (or `target_feerate_sat_per_1000_weight`, if it is greater). /// * If our counterparty is the channel initiator, we will refuse to accept a channel closure /// transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which /// will appear on a force-closure transaction, whichever is lower). @@ -2619,16 +2690,20 @@ where /// channel. /// /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis - /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background - /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal + /// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, shutdown_script: Option) -> Result<(), APIError> { self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script) } - #[inline] - fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) { - let (monitor_update_option, mut failed_htlcs) = shutdown_res; + fn finish_close_channel(&self, shutdown_res: ShutdownResult) { + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + #[cfg(debug_assertions)] + for (_, peer) in self.per_peer_state.read().unwrap().iter() { + debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); + } + + let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res; log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; @@ -2643,6 +2718,31 @@ where // ignore the result here. let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update); } + let mut shutdown_results = Vec::new(); + if let Some(txid) = unbroadcasted_batch_funding_txid { + let mut funding_batch_states = self.funding_batch_states.lock().unwrap(); + let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten(); + let per_peer_state = self.per_peer_state.read().unwrap(); + let mut has_uncompleted_channel = None; + for (channel_id, counterparty_node_id, state) in affected_channels { + if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { + let mut peer_state = peer_state_mutex.lock().unwrap(); + if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { + update_maps_on_chan_removal!(self, &chan.context()); + self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure); + shutdown_results.push(chan.context_mut().force_shutdown(false)); + } + } + has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); + } + debug_assert!( + has_uncompleted_channel.unwrap_or(true), + "Closing a batch where all channels have completed initial monitor update", + ); + } + for shutdown_result in shutdown_results.drain(..) { + self.finish_close_channel(shutdown_result); + } } /// `peer_msg` should be set when we receive a message from a peer, but not set when the @@ -2653,8 +2753,7 @@ where let peer_state_mutex = per_peer_state.get(peer_node_id) .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?; let (update_opt, counterparty_node_id) = { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state = &mut *peer_state_lock; + let mut peer_state = peer_state_mutex.lock().unwrap(); let closure_reason = if let Some(peer_msg) = peer_msg { ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) } } else { @@ -2664,13 +2763,15 @@ where log_error!(self.logger, "Force-closing channel {}", channel_id); self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason); let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); + mem::drop(peer_state); + mem::drop(per_peer_state); match chan_phase { ChannelPhase::Funded(mut chan) => { - self.finish_force_close_channel(chan.context.force_shutdown(broadcast)); + self.finish_close_channel(chan.context.force_shutdown(broadcast)); (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id()) }, ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { - self.finish_force_close_channel(chan_phase.context_mut().force_shutdown(false)); + self.finish_close_channel(chan_phase.context_mut().force_shutdown(false)); // Unfunded channel has no update (None, chan_phase.context().get_counterparty_node_id()) }, @@ -2686,10 +2787,17 @@ where } }; if let Some(update) = update_opt { - let mut peer_state = peer_state_mutex.lock().unwrap(); - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); + // Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if + // not try to broadcast it via whatever peer we have. + let per_peer_state = self.per_peer_state.read().unwrap(); + let a_peer_state_opt = per_peer_state.get(peer_node_id) + .ok_or(per_peer_state.values().next()); + if let Ok(a_peer_state_mutex) = a_peer_state_opt { + let mut a_peer_state = a_peer_state_mutex.lock().unwrap(); + a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } } Ok(counterparty_node_id) @@ -2831,7 +2939,7 @@ where // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a // channel closure (see HTLC_FAIL_BACK_BUFFER rationale). let current_height: u32 = self.best_block.read().unwrap().height(); - if (outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 { + if cltv_expiry <= current_height + HTLC_FAIL_BACK_BUFFER + 1 { let mut err_data = Vec::with_capacity(12); err_data.extend_from_slice(&amt_msat.to_be_bytes()); err_data.extend_from_slice(¤t_height.to_be_bytes()); @@ -3375,9 +3483,8 @@ where /// In general, a path may raise: /// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee, /// node public key) is specified. - /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates - /// (including due to previous monitor update failure or new permanent monitor update - /// failure). + /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available as it has been + /// closed, doesn't exist, or the peer is currently disconnected. /// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the /// relevant updates. /// @@ -3446,19 +3553,10 @@ where /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to /// determine the ultimate status of a payment. /// - /// # Requested Invoices - /// - /// In the case of paying a [`Bolt12Invoice`], abandoning the payment prior to receiving the - /// invoice will result in an [`Event::InvoiceRequestFailed`] and prevent any attempts at paying - /// it once received. The other events may only be generated once the invoice has been received. - /// /// # Restart Behavior /// /// If an [`Event::PaymentFailed`] is generated and we restart without first persisting the - /// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated; likewise for - /// [`Event::InvoiceRequestFailed`]. - /// - /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + /// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated. pub fn abandon_payment(&self, payment_id: PaymentId) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events); @@ -3524,13 +3622,13 @@ where /// /// See [`ChannelManager::send_preflight_probes`] for more information. pub fn send_spontaneous_preflight_probes( - &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32, + &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32, liquidity_limit_multiplier: Option, ) -> Result, ProbeSendFailure> { let payment_params = PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta); - let route_params = RouteParameters { payment_params, final_value_msat: amount_msat }; + let route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat); self.send_preflight_probes(route_params, liquidity_limit_multiplier) } @@ -3631,8 +3729,9 @@ where /// Handles the generation of a funding transaction, optionally (for tests) with a function /// which checks the correctness of the funding transaction given the associated channel. - fn funding_transaction_generated_intern, &Transaction) -> Result>( - &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput + fn funding_transaction_generated_intern, &Transaction) -> Result>( + &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, is_batch_funding: bool, + mut find_funding_output: FundingOutput, ) -> Result<(), APIError> { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -3644,7 +3743,7 @@ where Some(ChannelPhase::UnfundedOutboundV1(chan)) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; - let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger) + let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &self.logger) .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); let user_id = chan.context.get_user_id(); @@ -3700,7 +3799,7 @@ where #[cfg(test)] pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> { - self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| { + self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_, tx| { Ok(OutPoint { txid: tx.txid(), index: output_index }) }) } @@ -3736,17 +3835,37 @@ where /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed pub fn funding_transaction_generated(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> { + self.batch_funding_transaction_generated(&[(temporary_channel_id, counterparty_node_id)], funding_transaction) + } + + /// Call this upon creation of a batch funding transaction for the given channels. + /// + /// Return values are identical to [`Self::funding_transaction_generated`], respective to + /// each individual channel and transaction output. + /// + /// Do NOT broadcast the funding transaction yourself. This batch funding transcaction + /// will only be broadcast when we have safely received and persisted the counterparty's + /// signature for each channel. + /// + /// If there is an error, all channels in the batch are to be considered closed. + pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let mut result = Ok(()); if !funding_transaction.is_coin_base() { for inp in funding_transaction.input.iter() { if inp.witness.is_empty() { - return Err(APIError::APIMisuseError { + result = result.and(Err(APIError::APIMisuseError { err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned() - }); + })); } } } + if funding_transaction.output.len() > u16::max_value() as usize { + result = result.and(Err(APIError::APIMisuseError { + err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() + })); + } { let height = self.best_block.read().unwrap().height(); // Transactions are evaluated as final by network mempools if their locktime is strictly @@ -3754,37 +3873,93 @@ where // node might not have perfect sync about their blockchain views. Thus, if the wallet // module is ahead of LDK, only allow one more block of headroom. if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 1 { - return Err(APIError::APIMisuseError { + result = result.and(Err(APIError::APIMisuseError { err: "Funding transaction absolute timelock is non-final".to_owned() - }); + })); } } - self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| { - if tx.output.len() > u16::max_value() as usize { - return Err(APIError::APIMisuseError { - err: "Transaction had more than 2^16 outputs, which is not supported".to_owned() - }); - } - let mut output_index = None; - let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); - for (idx, outp) in tx.output.iter().enumerate() { - if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { - if output_index.is_some() { + let txid = funding_transaction.txid(); + let is_batch_funding = temporary_channels.len() > 1; + let mut funding_batch_states = if is_batch_funding { + Some(self.funding_batch_states.lock().unwrap()) + } else { + None + }; + let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| { + match states.entry(txid) { + btree_map::Entry::Occupied(_) => { + result = result.clone().and(Err(APIError::APIMisuseError { + err: "Batch funding transaction with the same txid already exists".to_owned() + })); + None + }, + btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())), + } + }); + for &(temporary_channel_id, counterparty_node_id) in temporary_channels.iter() { + result = result.and_then(|_| self.funding_transaction_generated_intern( + temporary_channel_id, + counterparty_node_id, + funding_transaction.clone(), + is_batch_funding, + |chan, tx| { + let mut output_index = None; + let expected_spk = chan.context.get_funding_redeemscript().to_v0_p2wsh(); + for (idx, outp) in tx.output.iter().enumerate() { + if outp.script_pubkey == expected_spk && outp.value == chan.context.get_value_satoshis() { + if output_index.is_some() { + return Err(APIError::APIMisuseError { + err: "Multiple outputs matched the expected script and value".to_owned() + }); + } + output_index = Some(idx as u16); + } + } + if output_index.is_none() { return Err(APIError::APIMisuseError { - err: "Multiple outputs matched the expected script and value".to_owned() + err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned() }); } - output_index = Some(idx as u16); + let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() }; + if let Some(funding_batch_state) = funding_batch_state.as_mut() { + funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false)); + } + Ok(outpoint) + }) + ); + } + if let Err(ref e) = result { + // Remaining channels need to be removed on any error. + let e = format!("Error in transaction funding: {:?}", e); + let mut channels_to_remove = Vec::new(); + channels_to_remove.extend(funding_batch_states.as_mut() + .and_then(|states| states.remove(&txid)) + .into_iter().flatten() + .map(|(chan_id, node_id, _state)| (chan_id, node_id)) + ); + channels_to_remove.extend(temporary_channels.iter() + .map(|(&chan_id, &node_id)| (chan_id, node_id)) + ); + let mut shutdown_results = Vec::new(); + { + let per_peer_state = self.per_peer_state.read().unwrap(); + for (channel_id, counterparty_node_id) in channels_to_remove { + per_peer_state.get(&counterparty_node_id) + .map(|peer_state_mutex| peer_state_mutex.lock().unwrap()) + .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id)) + .map(|mut chan| { + update_maps_on_chan_removal!(self, &chan.context()); + self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() }); + shutdown_results.push(chan.context_mut().force_shutdown(false)); + }); } } - if output_index.is_none() { - return Err(APIError::APIMisuseError { - err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned() - }); + for shutdown_result in shutdown_results.drain(..) { + self.finish_close_channel(shutdown_result); } - Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() }) - }) + } + result } /// Atomically applies partial updates to the [`ChannelConfig`] of the given channels. @@ -3827,7 +4002,7 @@ where for channel_id in channel_ids { if !peer_state.has_channel(channel_id) { return Err(APIError::ChannelUnavailable { - err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", channel_id, counterparty_node_id), + err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id), }); }; } @@ -3938,7 +4113,7 @@ where next_hop_channel_id, next_node_id) }), None => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}.", + err: format!("Channel with id {} not found for the passed counterparty node_id {}", next_hop_channel_id, next_node_id) }) } @@ -4551,8 +4726,10 @@ where if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; } // If the feerate has decreased by less than half, don't bother if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() { - log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", + if new_feerate != chan.context.get_feerate_sat_per_1000_weight() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate); + } return NotifyOption::SkipPersistNoEvents; } if !chan.context.is_live() { @@ -4576,8 +4753,8 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut should_persist = NotifyOption::SkipPersistNoEvents; - let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); + let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); + let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee); let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { @@ -4587,9 +4764,9 @@ where |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None } ) { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - min_mempool_feerate + anchor_feerate } else { - normal_feerate + non_anchor_feerate }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -4621,14 +4798,15 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut should_persist = NotifyOption::SkipPersistNoEvents; - let normal_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal); - let min_mempool_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MempoolMinimum); + let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee); + let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee); let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new(); let mut timed_out_mpp_htlcs = Vec::new(); let mut pending_peers_awaiting_removal = Vec::new(); + let mut shutdown_channels = Vec::new(); - let process_unfunded_channel_tick = | + let mut process_unfunded_channel_tick = | chan_id: &ChannelId, context: &mut ChannelContext, unfunded_context: &mut UnfundedChannelContext, @@ -4641,7 +4819,7 @@ where "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id); update_maps_on_chan_removal!(self, &context); self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed); - self.finish_force_close_channel(context.force_shutdown(false)); + shutdown_channels.push(context.force_shutdown(false)); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { @@ -4668,9 +4846,9 @@ where match phase { ChannelPhase::Funded(chan) => { let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() { - min_mempool_feerate + anchor_feerate } else { - normal_feerate + non_anchor_feerate }; let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate); if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } @@ -4834,6 +5012,10 @@ where let _ = handle_error!(self, err, counterparty_node_id); } + for shutdown_res in shutdown_channels { + self.finish_close_channel(shutdown_res); + } + self.pending_outbound_payments.remove_stale_payments(&self.pending_events); // Technically we don't need to do this here, but if we have holding cell entries in a @@ -4990,6 +5172,7 @@ where // This ensures that future code doesn't introduce a lock-order requirement for // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling // this function with any `per_peer_state` peer lock acquired would. + #[cfg(debug_assertions)] for (_, peer) in self.per_peer_state.read().unwrap().iter() { debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } @@ -5997,7 +6180,8 @@ where } fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { - let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>; + let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new(); + let mut finish_shutdown = None; { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -6042,8 +6226,7 @@ where log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); let mut chan = remove_channel_phase!(self, chan_phase_entry); - self.finish_force_close_channel(chan.context_mut().force_shutdown(false)); - return Ok(()); + finish_shutdown = Some(chan.context_mut().force_shutdown(false)); }, } } else { @@ -6055,11 +6238,16 @@ where let reason = HTLCFailReason::from_failure_code(0x4000 | 8); self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver); } + if let Some(shutdown_res) = finish_shutdown { + self.finish_close_channel(shutdown_res); + } Ok(()) } fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> { + let mut shutdown_result = None; + let unbroadcasted_batch_funding_txid; let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) .ok_or_else(|| { @@ -6072,6 +6260,7 @@ where match peer_state.channel_by_id.entry(msg.channel_id.clone()) { hash_map::Entry::Occupied(mut chan_phase_entry) => { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { + unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry); if let Some(msg) = closing_signed { peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned { @@ -6108,6 +6297,11 @@ where }); } self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); + shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid)); + } + mem::drop(per_peer_state); + if let Some(shutdown_result) = shutdown_result { + self.finish_close_channel(shutdown_result); } Ok(()) } @@ -6566,8 +6760,13 @@ where if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersistNoEvents); } else { - log_debug!(self.logger, "Received channel_update for channel {}.", chan_id); - try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry); + log_debug!(self.logger, "Received channel_update {:?} for channel {}.", msg, chan_id); + let did_change = try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry); + // If nothing changed after applying their update, we don't need to bother + // persisting. + if !did_change { + return Ok(NotifyOption::SkipPersistNoEvents); + } } } else { return try_chan_phase_entry!(self, Err(ChannelError::Close( @@ -6712,7 +6911,7 @@ where } for failure in failed_channels.drain(..) { - self.finish_force_close_channel(failure); + self.finish_close_channel(failure); } has_pending_monitor_events @@ -6782,6 +6981,7 @@ where fn maybe_generate_initial_closing_signed(&self) -> bool { let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new(); let mut has_update = false; + let mut shutdown_results = Vec::new(); { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -6792,6 +6992,7 @@ where peer_state.channel_by_id.retain(|channel_id, phase| { match phase { ChannelPhase::Funded(chan) => { + let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid(); match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) { Ok((msg_opt, tx_opt)) => { if let Some(msg) = msg_opt { @@ -6814,6 +7015,7 @@ where log_info!(self.logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, &chan.context); + shutdown_results.push((None, Vec::new(), unbroadcasted_batch_funding_txid)); false } else { true } }, @@ -6835,6 +7037,10 @@ where let _ = handle_error!(self, err, counterparty_node_id); } + for shutdown_result in shutdown_results.drain(..) { + self.finish_close_channel(shutdown_result); + } + has_update } @@ -6860,7 +7066,7 @@ where counterparty_node_id, funding_txo, update }); } - self.finish_force_close_channel(failure); + self.finish_close_channel(failure); } } @@ -7800,7 +8006,6 @@ where fn peer_disconnected(&self, counterparty_node_id: &PublicKey) { let _persistence_guard = PersistenceNotifierGuard::optionally_notify( self, || NotifyOption::SkipPersistHandleEvents); - let mut failed_channels = Vec::new(); let mut per_peer_state = self.per_peer_state.write().unwrap(); let remove_peer = { @@ -7813,24 +8018,24 @@ where peer_state.channel_by_id.retain(|_, phase| { let context = match phase { ChannelPhase::Funded(chan) => { - chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger); - // We only retain funded channels that are not shutdown. - if !chan.is_shutdown() { + if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() { + // We only retain funded channels that are not shutdown. return true; } - &chan.context + &mut chan.context }, // Unfunded channels will always be removed. ChannelPhase::UnfundedOutboundV1(chan) => { - &chan.context + &mut chan.context }, ChannelPhase::UnfundedInboundV1(chan) => { - &chan.context + &mut chan.context }, }; // Clean up for removal. update_maps_on_chan_removal!(self, &context); self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer); + failed_channels.push(context.force_shutdown(false)); false }); // Note that we don't bother generating any events for pre-accept channels - @@ -7889,7 +8094,7 @@ where mem::drop(per_peer_state); for failure in failed_channels.drain(..) { - self.finish_force_close_channel(failure); + self.finish_close_channel(failure); } } @@ -8218,7 +8423,7 @@ impl Writeable for ChannelDetails { (10, self.channel_value_satoshis, required), (12, self.unspendable_punishment_reserve, option), (14, user_channel_id_low, required), - (16, self.next_outbound_htlc_limit_msat, required), // Forwards compatibility for removed balance_msat field. + (16, self.balance_msat, required), (18, self.outbound_capacity_msat, required), (19, self.next_outbound_htlc_limit_msat, required), (20, self.inbound_capacity_msat, required), @@ -8254,7 +8459,7 @@ impl Readable for ChannelDetails { (10, channel_value_satoshis, required), (12, unspendable_punishment_reserve, option), (14, user_channel_id_low, required), - (16, _balance_msat, option), // Backwards compatibility for removed balance_msat field. + (16, balance_msat, required), (18, outbound_capacity_msat, required), // Note that by the time we get past the required read above, outbound_capacity_msat will be // filled in, so we can safely unwrap it here. @@ -8280,8 +8485,6 @@ impl Readable for ChannelDetails { let user_channel_id = user_channel_id_low as u128 + ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64); - let _balance_msat: Option = _balance_msat; - Ok(Self { inbound_scid_alias, channel_id: channel_id.0.unwrap(), @@ -8294,6 +8497,7 @@ impl Readable for ChannelDetails { channel_value_satoshis: channel_value_satoshis.0.unwrap(), unspendable_punishment_reserve, user_channel_id, + balance_msat: balance_msat.0.unwrap(), outbound_capacity_msat: outbound_capacity_msat.0.unwrap(), next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(), next_outbound_htlc_minimum_msat: next_outbound_htlc_minimum_msat.0.unwrap(), @@ -8634,7 +8838,7 @@ where } number_of_funded_channels += peer_state.channel_by_id.iter().filter( - |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_initiated() } else { false } + |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false } ).count(); } @@ -8645,7 +8849,7 @@ where let peer_state = &mut *peer_state_lock; for channel in peer_state.channel_by_id.iter().filter_map( |(_, phase)| if let ChannelPhase::Funded(channel) = phase { - if channel.context.is_funding_initiated() { Some(channel) } else { None } + if channel.context.is_funding_broadcast() { Some(channel) } else { None } } else { None } ) { channel.write(writer)?; @@ -9069,7 +9273,10 @@ where log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let (monitor_update, mut new_failed_htlcs) = channel.context.force_shutdown(true); + let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true); + if batch_funding_txid.is_some() { + return Err(DecodeError::InvalidValue); + } if let Some((counterparty_node_id, funding_txo, update)) = monitor_update { close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update @@ -9109,7 +9316,7 @@ where if let Some(short_channel_id) = channel.context.get_short_channel_id() { short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id())); } - if channel.context.is_funding_initiated() { + if channel.context.is_funding_broadcast() { id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id()); } match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) { @@ -9474,6 +9681,7 @@ where pending_fee_msat: Some(path_fee), total_msat: path_amt, starting_block_height: best_block_height, + remaining_max_total_routing_fee_msat: None, // only used for retries, and we'll never retry on startup }); log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}", path_amt, &htlc.payment_hash, log_bytes!(session_priv_bytes)); @@ -9822,6 +10030,8 @@ where event_persist_notifier: Notifier::new(), needs_persist_flag: AtomicBool::new(false), + funding_batch_states: Mutex::new(BTreeMap::new()), + entropy_source: args.entropy_source, node_signer: args.node_signer, signer_provider: args.signer_provider, @@ -10541,6 +10751,16 @@ mod tests { check_api_error_message(expected_message, res_err) } + fn check_channel_unavailable_error(res_err: Result, expected_channel_id: ChannelId, peer_node_id: PublicKey) { + let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id); + check_api_error_message(expected_message, res_err) + } + + fn check_api_misuse_error(res_err: Result) { + let expected_message = "No such channel awaiting to be accepted.".to_string(); + check_api_error_message(expected_message, res_err) + } + fn check_api_error_message(expected_err_message: String, res_err: Result) { match res_err { Err(APIError::APIMisuseError { err }) => { @@ -10585,6 +10805,36 @@ mod tests { check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key); } + #[test] + fn test_api_calls_with_unavailable_channel() { + // Tests that our API functions that expects a `counterparty_node_id` and a `channel_id` + // as input, behaves as expected if the `counterparty_node_id` is a known peer in the + // `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with + // the given `channel_id`. + let chanmon_cfg = create_chanmon_cfgs(2); + let node_cfg = create_node_cfgs(2, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]); + let nodes = create_network(2, &node_cfg, &node_chanmgr); + + let counterparty_node_id = nodes[1].node.get_our_node_id(); + + // Dummy values + let channel_id = ChannelId::from_bytes([4; 32]); + + // Test the API functions. + check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42)); + + check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id); + + check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id); + } + #[test] fn test_connection_limiting() { // Test that we limit un-channel'd peers and un-funded channels properly. @@ -10845,6 +11095,30 @@ mod tests { sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat)).is_ok()); } + #[test] + fn test_final_incorrect_cltv(){ + let chanmon_cfg = create_chanmon_cfgs(1); + let node_cfg = create_node_cfgs(1, &chanmon_cfg); + let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]); + let node = create_network(1, &node_cfg, &node_chanmgr); + + let result = node[0].node.construct_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive { + amt_msat: 100, + outgoing_cltv_value: 22, + payment_metadata: None, + keysend_preimage: None, + payment_data: Some(msgs::FinalOnionHopData { + payment_secret: PaymentSecret([0; 32]), total_msat: 100, + }), + custom_tlvs: Vec::new(), + }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None); + + // Should not return an error as this condition: + // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334 + // is not satisfied. + assert!(result.is_ok()); + } + #[test] fn test_inbound_anchors_manual_acceptance() { // Tests that we properly limit inbound channels when we have the manual-channel-acceptance