use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
+ #[cfg(feature = "std")]
use core::time::Duration;
use core::sync::atomic::Ordering;
use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
// All nodes start with a persistable update pending as `create_network` connects each node
// with all other nodes to make most tests simpler.
- assert!(nodes[0].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
- assert!(nodes[2].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[2].node.get_persistable_update_future().poll_is_complete());
let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
&nodes[0].node.get_our_node_id()).pop().unwrap();
// The first two nodes (which opened a channel) should now require fresh persistence
- assert!(nodes[0].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
// ... but the last node should not.
- assert!(!nodes[2].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
+ assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
// After persisting the first two nodes they should no longer need fresh persistence.
- assert!(!nodes[0].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
// about the channel.
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
- assert!(!nodes[2].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
+ assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
// The nodes which are a party to the channel should also ignore messages from unrelated
// parties.
nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
- assert!(!nodes[0].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
// At this point the channel info given by peers should still be the same.
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
// persisted and that its channel info remains the same.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
- assert!(!nodes[0].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
- assert!(!nodes[1].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
+ assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
// the channel info has updated.
nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
- assert!(nodes[0].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
- assert!(nodes[1].node.get_persistable_update_future().wait_timeout(Duration::from_millis(1)));
+ assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
+ assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
}
Condvar { inner: StdCondvar::new() }
}
- pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
- let mutex: &'a Mutex<T> = guard.mutex;
- self.inner.wait(guard.into_inner()).map(|lock| MutexGuard { mutex, lock }).map_err(|_| ())
- }
-
pub fn wait_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, condition: F)
-> LockResult<MutexGuard<'a, T>> {
let mutex: &'a Mutex<T> = guard.mutex;
.map_err(|_| ())
}
- #[allow(unused)]
- pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
- let mutex = guard.mutex;
- self.inner.wait_timeout(guard.into_inner(), dur).map(|(lock, _)| (MutexGuard { mutex, lock }, ())).map_err(|_| ())
- }
-
#[allow(unused)]
pub fn wait_timeout_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F)
-> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
pub use ::alloc::sync::Arc;
use core::ops::{Deref, DerefMut};
-use core::time::Duration;
use core::cell::{RefCell, Ref, RefMut};
use super::{LockTestExt, LockHeldState};
pub type LockResult<Guard> = Result<Guard, ()>;
-pub struct Condvar {}
-
-pub struct WaitTimeoutResult(bool);
-impl WaitTimeoutResult {
- pub fn timed_out(&self) -> bool { self.0 }
-}
-
-impl Condvar {
- pub fn new() -> Condvar {
- Condvar { }
- }
-
- pub fn wait<'a, T>(&'a self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
- Ok(guard)
- }
-
- #[allow(unused)]
- pub fn wait_timeout<'a, T>(&'a self, guard: MutexGuard<'a, T>, _dur: Duration) -> LockResult<(MutexGuard<'a, T>, ())> {
- Ok((guard, ()))
- }
-
- pub fn wait_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, mut guard: MutexGuard<'a, T>, mut condition: F)
- -> LockResult<MutexGuard<'a, T>> {
- assert!(!condition(&mut *guard));
- Ok(guard)
- }
-
- #[allow(unused)]
- pub fn wait_timeout_while<'a, T, F: FnMut(&mut T) -> bool>(&'a self, mut guard: MutexGuard<'a, T>, dur: Duration, mut condition: F)
- -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
- if condition(&mut *guard) {
- Ok((guard, WaitTimeoutResult(true)))
- } else {
- Ok((guard, WaitTimeoutResult(false)))
- }
- }
-
- pub fn notify_all(&self) {}
-}
-
pub struct Mutex<T: ?Sized> {
inner: RefCell<T>
}
use alloc::sync::Arc;
use core::mem;
-use crate::sync::{Condvar, Mutex};
+use crate::sync::Mutex;
use crate::prelude::*;
-#[cfg(any(test, feature = "std"))]
+#[cfg(feature = "std")]
+use crate::sync::Condvar;
+#[cfg(feature = "std")]
use std::time::Duration;
use core::future::Future as StdFuture;
}
/// Waits until this [`Future`] completes.
+ #[cfg(feature = "std")]
pub fn wait(self) {
Sleeper::from_single_future(self).wait();
}
/// Waits until this [`Future`] completes or the given amount of time has elapsed.
///
/// Returns true if the [`Future`] completed, false if the time elapsed.
- #[cfg(any(test, feature = "std"))]
+ #[cfg(feature = "std")]
pub fn wait_timeout(self, max_wait: Duration) -> bool {
Sleeper::from_single_future(self).wait_timeout(max_wait)
}
+
+ #[cfg(test)]
+ pub fn poll_is_complete(&self) -> bool {
+ let mut state = self.state.lock().unwrap();
+ if state.complete {
+ state.callbacks_made = true;
+ true
+ } else { false }
+ }
}
use core::task::Waker;
/// A struct which can be used to select across many [`Future`]s at once without relying on a full
/// async context.
+#[cfg(feature = "std")]
pub struct Sleeper {
notifiers: Vec<Arc<Mutex<FutureState>>>,
}
+#[cfg(feature = "std")]
impl Sleeper {
/// Constructs a new sleeper from one future, allowing blocking on it.
pub fn from_single_future(future: Future) -> Self {
/// Wait until one of the [`Future`]s registered with this [`Sleeper`] has completed or the
/// given amount of time has elapsed. Returns true if a [`Future`] completed, false if the time
/// elapsed.
- #[cfg(any(test, feature = "std"))]
pub fn wait_timeout(&self, max_wait: Duration) -> bool {
let (cv, notified_fut_mtx) = self.setup_wait();
let notified_fut =
}
#[test]
+ #[cfg(feature = "std")]
fn test_dropped_future_doesnt_count() {
// Tests that if a Future gets drop'd before it is poll()ed `Ready` it doesn't count as
// having been woken, leaving the notify-required flag set.
}
#[test]
+ #[cfg(feature = "std")]
fn test_multi_future_sleep() {
// Tests the `Sleeper` with multiple futures.
let notifier_a = Notifier::new();