/// * Monitoring whether the ChannelManager needs to be re-persisted to disk, and if so,
/// writing it to disk/backups by invoking the callback given to it at startup.
/// ChannelManager persistence should be done in the background.
-/// * Calling `ChannelManager::timer_chan_freshness_every_min()` every minute (can be done in the
+/// * Calling `ChannelManager::timer_tick_occurred()` and
+/// `PeerManager::timer_tick_occurred()` every minute (can be done in the
/// background).
///
/// Note that if ChannelManager persistence fails and the persisted manager becomes out-of-date,
}
#[cfg(not(test))]
-const CHAN_FRESHNESS_TIMER: u64 = 60;
+const FRESHNESS_TIMER: u64 = 60;
#[cfg(test)]
-const CHAN_FRESHNESS_TIMER: u64 = 1;
+const FRESHNESS_TIMER: u64 = 1;
impl BackgroundProcessor {
/// Start a background thread that takes care of responsibilities enumerated in the top-level
log_trace!(logger, "Terminating background processor.");
return Ok(());
}
- if current_time.elapsed().as_secs() > CHAN_FRESHNESS_TIMER {
- log_trace!(logger, "Calling manager's timer_chan_freshness_every_min");
- channel_manager.timer_chan_freshness_every_min();
+ if current_time.elapsed().as_secs() > FRESHNESS_TIMER {
+ log_trace!(logger, "Calling ChannelManager's and PeerManager's timer_tick_occurred");
+ channel_manager.timer_tick_occurred();
+ peer_manager.timer_tick_occurred();
current_time = Instant::now();
}
}
}
#[test]
- fn test_chan_freshness_called() {
- // Test that ChannelManager's `timer_chan_freshness_every_min` is called every
- // `CHAN_FRESHNESS_TIMER`.
- let nodes = create_nodes(1, "test_chan_freshness_called".to_string());
+ fn test_timer_tick_called() {
+ // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every
+ // `FRESHNESS_TIMER`.
+ let nodes = create_nodes(1, "test_timer_tick_called".to_string());
let data_dir = nodes[0].persister.get_data_dir();
let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
loop {
let log_entries = nodes[0].logger.lines.lock().unwrap();
- let desired_log = "Calling manager's timer_chan_freshness_every_min".to_string();
+ let desired_log = "Calling ChannelManager's and PeerManager's timer_tick_occurred".to_string();
if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() {
break
}
extern crate bitcoin;
extern crate libc;
-use bitcoin::{BlockHash, Txid};
+use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::hashes::hex::{FromHex, ToHex};
use crate::util::DiskWriteable;
use lightning::chain;
use lightning::ln::channelmanager::ChannelManager;
use lightning::util::logger::Logger;
use lightning::util::ser::{ReadableArgs, Writeable};
-use std::collections::HashMap;
use std::fs;
use std::io::{Cursor, Error};
use std::ops::Deref;
use std::path::{Path, PathBuf};
-use std::sync::Arc;
/// FilesystemPersister persists channel data on disk, where each channel's
/// data is stored in a file named after its funding outpoint.
}
}
-impl<Signer: Sign, M, T, K, F, L> DiskWriteable for ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>
-where M: chain::Watch<Signer>,
- T: BroadcasterInterface,
- K: KeysInterface<Signer=Signer>,
- F: FeeEstimator,
- L: Logger,
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> DiskWriteable for ChannelManager<Signer, M, T, K, F, L>
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer=Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
fn write_to_file(&self, writer: &mut fs::File) -> Result<(), std::io::Error> {
self.write(writer)
/// Writes the provided `ChannelManager` to the path provided at `FilesystemPersister`
/// initialization, within a file called "manager".
- pub fn persist_manager<Signer, M, T, K, F, L>(
+ pub fn persist_manager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
data_dir: String,
- manager: &ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>
+ manager: &ChannelManager<Signer, M, T, K, F, L>
) -> Result<(), std::io::Error>
- where Signer: Sign,
- M: chain::Watch<Signer>,
- T: BroadcasterInterface,
- K: KeysInterface<Signer=Signer>,
- F: FeeEstimator,
- L: Logger
+ where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer=Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
let path = PathBuf::from(data_dir);
util::write_to_file(path, "manager".to_string(), manager)
/// Read `ChannelMonitor`s from disk.
pub fn read_channelmonitors<Signer: Sign, K: Deref> (
&self, keys_manager: K
- ) -> Result<HashMap<OutPoint, (BlockHash, ChannelMonitor<Signer>)>, std::io::Error>
- where K::Target: KeysInterface<Signer=Signer> + Sized
+ ) -> Result<Vec<(BlockHash, ChannelMonitor<Signer>)>, std::io::Error>
+ where K::Target: KeysInterface<Signer=Signer> + Sized,
{
let path = self.path_to_monitor_data();
if !Path::new(&path).exists() {
- return Ok(HashMap::new());
+ return Ok(Vec::new());
}
- let mut outpoint_to_channelmonitor = HashMap::new();
+ let mut res = Vec::new();
for file_option in fs::read_dir(path).unwrap() {
let file = file_option.unwrap();
let owned_file_name = file.file_name();
let mut buffer = Cursor::new(&contents);
match <(BlockHash, ChannelMonitor<Signer>)>::read(&mut buffer, &*keys_manager) {
Ok((blockhash, channel_monitor)) => {
- outpoint_to_channelmonitor.insert(
- OutPoint { txid: txid.unwrap(), index: index.unwrap() },
- (blockhash, channel_monitor),
- );
+ if channel_monitor.get_funding_txo().0.txid != txid.unwrap() || channel_monitor.get_funding_txo().0.index != index.unwrap() {
+ return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "ChannelMonitor was stored in the wrong file"));
+ }
+ res.push((blockhash, channel_monitor));
}
Err(e) => return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
))
}
}
- Ok(outpoint_to_channelmonitor)
+ Ok(res)
}
}
fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
util::write_to_file(self.path_to_monitor_data(), filename, monitor)
- .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
+ .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
}
fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
util::write_to_file(self.path_to_monitor_data(), filename, monitor)
- .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
+ .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
}
}
// Check that the persisted channel data is empty before any channels are
// open.
let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap();
- assert_eq!(persisted_chan_data_0.keys().len(), 0);
+ assert_eq!(persisted_chan_data_0.len(), 0);
let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap();
- assert_eq!(persisted_chan_data_1.keys().len(), 0);
+ assert_eq!(persisted_chan_data_1.len(), 0);
// Helper to make sure the channel is on the expected update ID.
macro_rules! check_persisted_data {
($expected_update_id: expr) => {
persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap();
- assert_eq!(persisted_chan_data_0.keys().len(), 1);
- for (_, mon) in persisted_chan_data_0.values() {
+ assert_eq!(persisted_chan_data_0.len(), 1);
+ for (_, mon) in persisted_chan_data_0.iter() {
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
}
persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap();
- assert_eq!(persisted_chan_data_1.keys().len(), 1);
- for (_, mon) in persisted_chan_data_1.values() {
+ assert_eq!(persisted_chan_data_1.len(), 1);
+ for (_, mon) in persisted_chan_data_1.iter() {
assert_eq!(mon.get_latest_update_id(), $expected_update_id);
}
}
}
/// Events which we process internally but cannot be procsesed immediately at the generation site
-/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with
+/// for some reason. They are handled in timer_tick_occurred, so may be processed with
/// quite some time lag.
enum BackgroundEvent {
/// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
-/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
+/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
///
/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
events.append(&mut new_events);
}
- /// Free the background events, generally called from timer_chan_freshness_every_min.
+ /// Free the background events, generally called from timer_tick_occurred.
///
/// Exposed for testing to allow us to process events quickly without generating accidental
- /// BroadcastChannelUpdate events in timer_chan_freshness_every_min.
+ /// BroadcastChannelUpdate events in timer_tick_occurred.
///
/// Expects the caller to have a total_consistency_lock read lock.
fn process_background_events(&self) {
/// This method handles all the details, and must be called roughly once per minute.
///
/// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
- pub fn timer_chan_freshness_every_min(&self) {
+ pub fn timer_tick_occurred(&self) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
self.process_background_events();
// We cannot broadcast our latest local state via monitor update (as
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
- // timer_chan_freshness_every_min, guaranteeing we're running normally.
+ // timer_tick_occurred, guaranteeing we're running normally.
if let Some((funding_txo, update)) = failure.0.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {