From: Matt Corallo Date: Thu, 18 Jul 2019 22:13:28 +0000 (-0400) Subject: Drop system clock calls for PendingHTLCsForwardable events. X-Git-Tag: v0.0.12~208^2 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=e2a9ed7265680cc821f29860264c7f4d53e044b9;p=rust-lightning Drop system clock calls for PendingHTLCsForwardable events. Instead, return a Duration and let the user do the work of waiting. This is one of only a handful of steps to make us mostly-syscall-free, at least enough to run in WASM according to elichai. --- diff --git a/src/ln/chanmon_update_fail_tests.rs b/src/ln/chanmon_update_fail_tests.rs index 69e056217..322ec7d8c 100644 --- a/src/ln/chanmon_update_fail_tests.rs +++ b/src/ln/chanmon_update_fail_tests.rs @@ -13,8 +13,6 @@ use util::errors::APIError; use bitcoin_hashes::sha256::Hash as Sha256; use bitcoin_hashes::Hash; -use std::time::Instant; - use ln::functional_test_utils::*; #[test] @@ -1495,7 +1493,6 @@ fn test_monitor_update_on_pending_forwards() { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; - nodes[0].node.channel_state.lock().unwrap().next_forward = Instant::now(); nodes[0].node.process_pending_htlc_forwards(); expect_payment_received!(nodes[0], payment_hash_2, 1000000); diff --git a/src/ln/channelmanager.rs b/src/ln/channelmanager.rs index 3ffd080e9..2238a09bf 100644 --- a/src/ln/channelmanager.rs +++ b/src/ln/channelmanager.rs @@ -46,7 +46,7 @@ use std::collections::{HashMap, hash_map, HashSet}; use std::io::Cursor; use std::sync::{Arc, Mutex, MutexGuard, RwLock}; use std::sync::atomic::{AtomicUsize, Ordering}; -use std::time::{Instant,Duration}; +use std::time::Duration; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -247,7 +247,6 @@ pub(super) enum RAACommitmentOrder { pub(super) struct ChannelHolder { pub(super) by_id: HashMap<[u8; 32], Channel>, pub(super) short_to_id: HashMap, - pub(super) next_forward: Instant, /// short channel id -> forward infos. Key of 0 means payments received /// Note that while this is held in the same mutex as the channels themselves, no consistency /// guarantees are made about the existence of a channel with the short id here, nor the short @@ -266,7 +265,6 @@ pub(super) struct ChannelHolder { pub(super) struct MutChannelHolder<'a> { pub(super) by_id: &'a mut HashMap<[u8; 32], Channel>, pub(super) short_to_id: &'a mut HashMap, - pub(super) next_forward: &'a mut Instant, pub(super) forward_htlcs: &'a mut HashMap>, pub(super) claimable_htlcs: &'a mut HashMap>, pub(super) pending_msg_events: &'a mut Vec, @@ -276,7 +274,6 @@ impl ChannelHolder { MutChannelHolder { by_id: &mut self.by_id, short_to_id: &mut self.short_to_id, - next_forward: &mut self.next_forward, forward_htlcs: &mut self.forward_htlcs, claimable_htlcs: &mut self.claimable_htlcs, pending_msg_events: &mut self.pending_msg_events, @@ -549,7 +546,6 @@ impl ChannelManager { channel_state: Mutex::new(ChannelHolder{ by_id: HashMap::new(), short_to_id: HashMap::new(), - next_forward: Instant::now(), forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), pending_msg_events: Vec::new(), @@ -1184,10 +1180,6 @@ impl ChannelManager { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = channel_state_lock.borrow_parts(); - if cfg!(not(feature = "fuzztarget")) && Instant::now() < *channel_state.next_forward { - return; - } - for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { if short_chan_id != 0 { let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) { @@ -1467,8 +1459,7 @@ impl ChannelManager { let mut forward_event = None; if channel_state_lock.forward_htlcs.is_empty() { - forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); - channel_state_lock.next_forward = forward_event.unwrap(); + forward_event = Some(Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); } match channel_state_lock.forward_htlcs.entry(short_channel_id) { hash_map::Entry::Occupied(mut entry) => { @@ -2077,8 +2068,7 @@ impl ChannelManager { if !pending_forwards.is_empty() { let mut channel_state = self.channel_state.lock().unwrap(); if channel_state.forward_htlcs.is_empty() { - forward_event = Some(Instant::now() + Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); - channel_state.next_forward = forward_event.unwrap(); + forward_event = Some(Duration::from_millis(((rng::rand_f32() * 4.0 + 1.0) * MIN_HTLC_RELAY_HOLDING_CELL_MILLIS as f32) as u64)); } for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { match channel_state.forward_htlcs.entry(forward_info.short_channel_id) { @@ -3087,7 +3077,6 @@ impl<'a, R : ::std::io::Read> ReadableArgs> for (S channel_state: Mutex::new(ChannelHolder { by_id, short_to_id, - next_forward: Instant::now(), forward_htlcs, claimable_htlcs, pending_msg_events: Vec::new(), diff --git a/src/ln/functional_test_utils.rs b/src/ln/functional_test_utils.rs index 9a24d503c..ffa7e7f7f 100644 --- a/src/ln/functional_test_utils.rs +++ b/src/ln/functional_test_utils.rs @@ -32,7 +32,6 @@ use std::collections::HashMap; use std::default::Default; use std::rc::Rc; use std::sync::{Arc, Mutex}; -use std::time::Instant; use std::mem; pub const CHAN_CONFIRM_DEPTH: u32 = 100; @@ -536,8 +535,6 @@ macro_rules! expect_pending_htlcs_forwardable { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; - let node_ref: &Node = &$node; - node_ref.node.channel_state.lock().unwrap().next_forward = Instant::now(); $node.node.process_pending_htlc_forwards(); }} } diff --git a/src/ln/functional_tests.rs b/src/ln/functional_tests.rs index 00225f18b..6580abbc6 100644 --- a/src/ln/functional_tests.rs +++ b/src/ln/functional_tests.rs @@ -43,7 +43,6 @@ use std::collections::{BTreeSet, HashMap, HashSet}; use std::default::Default; use std::sync::Arc; use std::sync::atomic::Ordering; -use std::time::Instant; use std::mem; use ln::functional_test_utils::*; @@ -2460,7 +2459,6 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use _ => panic!("Unexpected event"), }; } - nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -2813,7 +2811,6 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now(); nodes[1].node.process_pending_htlc_forwards(); let events_2 = nodes[1].node.get_and_clear_pending_events(); @@ -4463,7 +4460,6 @@ fn run_onion_failure_test_with_fail_intercept(_name: &str, test_case: macro_rules! expect_htlc_forward { ($node: expr) => {{ expect_event!($node, Event::PendingHTLCsForwardable); - $node.node.channel_state.lock().unwrap().next_forward = Instant::now(); $node.node.process_pending_htlc_forwards(); }} } diff --git a/src/util/events.rs b/src/util/events.rs index 8f8e7a52c..e4bed8f2e 100644 --- a/src/util/events.rs +++ b/src/util/events.rs @@ -21,7 +21,7 @@ use bitcoin::blockdata::script::Script; use secp256k1::key::PublicKey; -use std::time::Instant; +use std::time::Duration; /// An Event which you should probably take some action in response to. pub enum Event { @@ -92,8 +92,8 @@ pub enum Event { /// Used to indicate that ChannelManager::process_pending_htlc_forwards should be called at a /// time in the future. PendingHTLCsForwardable { - /// The earliest time at which process_pending_htlc_forwards should be called. - time_forwardable: Instant, + /// The amount of time that should be waited prior to calling process_pending_htlc_forwards + time_forwardable: Duration, }, /// Used to indicate that an output was generated on-chain which you should know how to spend. /// Such an output will *not* ever be spent by rust-lightning, so you need to store them