Merge pull request #2213 from benthecarman/error-sign-provider-addrs
[rust-lightning] / lightning / src / ln / channelmanager.rs
index 966b9170d56d1f3fac7ceb48531f81c49593abb1..55c796967d67e2548dda91944d32503a38336875 100644 (file)
@@ -72,7 +72,7 @@ use core::{cmp, mem};
 use core::cell::RefCell;
 use crate::io::Read;
 use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
-use core::sync::atomic::{AtomicUsize, Ordering};
+use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
 use core::time::Duration;
 use core::ops::Deref;
 
@@ -934,6 +934,8 @@ where
 
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_events: Mutex<Vec<events::Event>>,
+       /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
+       pending_events_processor: AtomicBool,
        /// See `ChannelManager` struct-level documentation for lock order requirements.
        pending_background_events: Mutex<Vec<BackgroundEvent>>,
        /// Used when we have to take a BIG lock to make sure everything is self-consistent.
@@ -1696,30 +1698,47 @@ macro_rules! handle_new_monitor_update {
 
 macro_rules! process_events_body {
        ($self: expr, $event_to_handle: expr, $handle_event: expr) => {
-               // We'll acquire our total consistency lock until the returned future completes so that
-               // we can be sure no other persists happen while processing events.
-               let _read_guard = $self.total_consistency_lock.read().unwrap();
+               let mut processed_all_events = false;
+               while !processed_all_events {
+                       if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
+                               return;
+                       }
 
-               let mut result = NotifyOption::SkipPersist;
+                       let mut result = NotifyOption::SkipPersist;
 
-               // TODO: This behavior should be documented. It's unintuitive that we query
-               // ChannelMonitors when clearing other events.
-               if $self.process_pending_monitor_events() {
-                       result = NotifyOption::DoPersist;
-               }
+                       {
+                               // We'll acquire our total consistency lock so that we can be sure no other
+                               // persists happen while processing monitor events.
+                               let _read_guard = $self.total_consistency_lock.read().unwrap();
+
+                               // TODO: This behavior should be documented. It's unintuitive that we query
+                               // ChannelMonitors when clearing other events.
+                               if $self.process_pending_monitor_events() {
+                                       result = NotifyOption::DoPersist;
+                               }
+                       }
 
-               let pending_events = mem::replace(&mut *$self.pending_events.lock().unwrap(), vec![]);
-               if !pending_events.is_empty() {
-                       result = NotifyOption::DoPersist;
-               }
+                       let pending_events = $self.pending_events.lock().unwrap().clone();
+                       let num_events = pending_events.len();
+                       if !pending_events.is_empty() {
+                               result = NotifyOption::DoPersist;
+                       }
 
-               for event in pending_events {
-                       $event_to_handle = event;
-                       $handle_event;
-               }
+                       for event in pending_events {
+                               $event_to_handle = event;
+                               $handle_event;
+                       }
 
-               if result == NotifyOption::DoPersist {
-                       $self.persistence_notifier.notify();
+                       {
+                               let mut pending_events = $self.pending_events.lock().unwrap();
+                               pending_events.drain(..num_events);
+                               processed_all_events = pending_events.is_empty();
+                               $self.pending_events_processor.store(false, Ordering::Release);
+                       }
+
+                       if result == NotifyOption::DoPersist {
+                               $self.persistence_notifier.notify();
+                       }
                }
        }
 }
@@ -1787,6 +1806,7 @@ where
                        per_peer_state: FairRwLock::new(HashMap::new()),
 
                        pending_events: Mutex::new(Vec::new()),
+                       pending_events_processor: AtomicBool::new(false),
                        pending_background_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
                        persistence_notifier: Notifier::new(),
@@ -1834,6 +1854,10 @@ where
        /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
        /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
        ///
+       /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be opened due to failing to
+       /// generate a shutdown scriptpubkey or destination script set by
+       /// [`SignerProvider::get_shutdown_scriptpubkey`] or [`SignerProvider::get_destination_script`].
+       ///
        /// Note that we do not check if you are currently connected to the given peer. If no
        /// connection is available, the outbound `open_channel` message may fail to send, resulting in
        /// the channel eventually being silently forgotten (dropped on reload).
@@ -2078,6 +2102,11 @@ where
        ///
        /// May generate a [`SendShutdown`] message event on success, which should be relayed.
        ///
+       /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
+       /// generate a shutdown scriptpubkey or destination script set by
+       /// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
+       /// channel.
+       ///
        /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
@@ -2102,6 +2131,11 @@ where
        ///
        /// May generate a [`SendShutdown`] message event on success, which should be relayed.
        ///
+       /// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
+       /// generate a shutdown scriptpubkey or destination script set by
+       /// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
+       /// channel.
+       ///
        /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
        /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
        /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
@@ -3006,10 +3040,11 @@ where
                }
                {
                        let height = self.best_block.read().unwrap().height();
-                       // Transactions are evaluated as final by network mempools at the next block. However, the modules
-                       // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
-                       // the wallet module is in advance on the LDK view, allow one more block of headroom.
-                       if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
+                       // Transactions are evaluated as final by network mempools if their locktime is strictly
+                       // lower than the next block height. However, the modules constituting our Lightning
+                       // node might not have perfect sync about their blockchain views. Thus, if the wallet
+                       // module is ahead of LDK, only allow one more block of headroom.
+                       if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 1 {
                                return Err(APIError::APIMisuseError {
                                        err: "Funding transaction absolute timelock is non-final".to_owned()
                                });
@@ -8029,6 +8064,7 @@ where
                        per_peer_state: FairRwLock::new(per_peer_state),
 
                        pending_events: Mutex::new(pending_events_read),
+                       pending_events_processor: AtomicBool::new(false),
                        pending_background_events: Mutex::new(pending_background_events),
                        total_consistency_lock: RwLock::new(()),
                        persistence_notifier: Notifier::new(),
@@ -8060,8 +8096,6 @@ mod tests {
        use bitcoin::hashes::Hash;
        use bitcoin::hashes::sha256::Hash as Sha256;
        use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
-       #[cfg(feature = "std")]
-       use core::time::Duration;
        use core::sync::atomic::Ordering;
        use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
        use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
@@ -9038,7 +9072,7 @@ pub mod bench {
                // calls per node.
                let network = bitcoin::Network::Testnet;
 
-               let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
+               let tx_broadcaster = test_utils::TestBroadcaster::new(network);
                let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
                let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
                let scorer = Mutex::new(test_utils::TestScorer::new());