X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-persister%2Fsrc%2Flib.rs;h=368945d1cfb489db52752a1e4ec6b4afedb0497d;hb=dba0709b084cd3c1f50f95afed5111e5f8afda39;hp=218366e7de4edea752a1d01258fa660e24d53479;hpb=ff00f6f8861419b73269e6c51d75ac9de75f1d1f;p=rust-lightning diff --git a/lightning-persister/src/lib.rs b/lightning-persister/src/lib.rs index 218366e7..368945d1 100644 --- a/lightning-persister/src/lib.rs +++ b/lightning-persister/src/lib.rs @@ -1,10 +1,19 @@ +//! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs. + +#![deny(broken_intra_doc_links)] +#![deny(missing_docs)] + +#![cfg_attr(all(test, feature = "unstable"), feature(test))] +#[cfg(all(test, feature = "unstable"))] extern crate test; + mod util; extern crate lightning; extern crate bitcoin; extern crate libc; -use bitcoin::hashes::hex::ToHex; +use bitcoin::{BlockHash, Txid}; +use bitcoin::hashes::hex::{FromHex, ToHex}; use crate::util::DiskWriteable; use lightning::chain; use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; @@ -14,20 +23,14 @@ use lightning::chain::keysinterface::{Sign, KeysInterface}; use lightning::chain::transaction::OutPoint; use lightning::ln::channelmanager::ChannelManager; use lightning::util::logger::Logger; -use lightning::util::ser::Writeable; +use lightning::util::ser::{ReadableArgs, Writeable}; +use std::collections::HashMap; use std::fs; -use std::io::Error; +use std::io::{Cursor, Error}; +use std::ops::Deref; +use std::path::{Path, PathBuf}; use std::sync::Arc; -#[cfg(test)] -use { - lightning::util::ser::ReadableArgs, - bitcoin::{BlockHash, Txid}, - bitcoin::hashes::hex::FromHex, - std::collections::HashMap, - std::io::Cursor -}; - /// FilesystemPersister persists channel data on disk, where each channel's /// data is stored in a file named after its funding outpoint. /// @@ -51,7 +54,7 @@ impl DiskWriteable for ChannelMonitor { } impl DiskWriteable for ChannelManager, Arc, Arc, Arc, Arc> -where M: chain::Watch, +where M: chain::Watch, T: BroadcasterInterface, K: KeysInterface, F: FeeEstimator, @@ -71,10 +74,17 @@ impl FilesystemPersister { } } + /// Get the directory which was provided when this persister was initialized. pub fn get_data_dir(&self) -> String { self.path_to_channel_data.clone() } + pub(crate) fn path_to_monitor_data(&self) -> PathBuf { + let mut path = PathBuf::from(self.path_to_channel_data.clone()); + path.push("monitors"); + path + } + /// Writes the provided `ChannelManager` to the path provided at `FilesystemPersister` /// initialization, within a file called "manager". pub fn persist_manager( @@ -82,60 +92,83 @@ impl FilesystemPersister { manager: &ChannelManager, Arc, Arc, Arc, Arc> ) -> Result<(), std::io::Error> where Signer: Sign, - M: chain::Watch, + M: chain::Watch, T: BroadcasterInterface, K: KeysInterface, F: FeeEstimator, L: Logger { - util::write_to_file(data_dir, "manager".to_string(), manager) + let path = PathBuf::from(data_dir); + util::write_to_file(path, "manager".to_string(), manager) } - #[cfg(test)] - fn load_channel_data(&self, keys: &Keys) -> - Result>, ChannelMonitorUpdateErr> { - if let Err(_) = fs::create_dir_all(&self.path_to_channel_data) { - return Err(ChannelMonitorUpdateErr::PermanentFailure); + /// Read `ChannelMonitor`s from disk. + pub fn read_channelmonitors ( + &self, keys_manager: K + ) -> Result)>, std::io::Error> + where K::Target: KeysInterface + Sized + { + let path = self.path_to_monitor_data(); + if !Path::new(&path).exists() { + return Ok(HashMap::new()); } - let mut res = HashMap::new(); - for file_option in fs::read_dir(&self.path_to_channel_data).unwrap() { + let mut outpoint_to_channelmonitor = HashMap::new(); + for file_option in fs::read_dir(path).unwrap() { let file = file_option.unwrap(); let owned_file_name = file.file_name(); let filename = owned_file_name.to_str(); if !filename.is_some() || !filename.unwrap().is_ascii() || filename.unwrap().len() < 65 { - return Err(ChannelMonitorUpdateErr::PermanentFailure); + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid ChannelMonitor file name", + )); } let txid = Txid::from_hex(filename.unwrap().split_at(64).0); - if txid.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); } - - let index = filename.unwrap().split_at(65).1.split('.').next().unwrap().parse(); - if index.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); } + if txid.is_err() { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid tx ID in filename", + )); + } - let contents = fs::read(&file.path()); - if contents.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); } + let index = filename.unwrap().split_at(65).1.parse(); + if index.is_err() { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid tx index in filename", + )); + } - if let Ok((_, loaded_monitor)) = - <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&contents.unwrap()), keys) { - res.insert(OutPoint { txid: txid.unwrap(), index: index.unwrap() }, loaded_monitor); - } else { - return Err(ChannelMonitorUpdateErr::PermanentFailure); + let contents = fs::read(&file.path())?; + let mut buffer = Cursor::new(&contents); + match <(BlockHash, ChannelMonitor)>::read(&mut buffer, &*keys_manager) { + Ok((blockhash, channel_monitor)) => { + outpoint_to_channelmonitor.insert( + OutPoint { txid: txid.unwrap(), index: index.unwrap() }, + (blockhash, channel_monitor), + ); + } + Err(e) => return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Failed to deserialize ChannelMonitor: {}", e), + )) } } - Ok(res) + Ok(outpoint_to_channelmonitor) } } -impl channelmonitor::Persist for FilesystemPersister { - fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { +impl channelmonitor::Persist for FilesystemPersister { + fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index); - util::write_to_file(self.path_to_channel_data.clone(), filename, monitor) + util::write_to_file(self.path_to_monitor_data(), filename, monitor) .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure) } - fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { + fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index); - util::write_to_file(self.path_to_channel_data.clone(), filename, monitor) + util::write_to_file(self.path_to_monitor_data(), filename, monitor) .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure) } } @@ -193,22 +226,22 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. - let mut persisted_chan_data_0 = persister_0.load_channel_data(nodes[0].keys_manager).unwrap(); + let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap(); assert_eq!(persisted_chan_data_0.keys().len(), 0); - let mut persisted_chan_data_1 = persister_1.load_channel_data(nodes[1].keys_manager).unwrap(); + let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap(); assert_eq!(persisted_chan_data_1.keys().len(), 0); // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { ($expected_update_id: expr) => { - persisted_chan_data_0 = persister_0.load_channel_data(nodes[0].keys_manager).unwrap(); + persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap(); assert_eq!(persisted_chan_data_0.keys().len(), 1); - for mon in persisted_chan_data_0.values() { + for (_, mon) in persisted_chan_data_0.values() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); } - persisted_chan_data_1 = persister_1.load_channel_data(nodes[1].keys_manager).unwrap(); + persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap(); assert_eq!(persisted_chan_data_1.keys().len(), 1); - for mon in persisted_chan_data_1.values() { + for (_, mon) in persisted_chan_data_1.values() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); } } @@ -227,15 +260,15 @@ mod tests { // Force close because cooperative close doesn't result in any persisted // updates. nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap(); - check_closed_broadcast!(nodes[0], false); + check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]}, 1); - check_closed_broadcast!(nodes[1], false); + let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]}); + check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); // Make sure everything is persisted as expected after close. @@ -316,3 +349,15 @@ mod tests { added_monitors.clear(); } } + +#[cfg(all(test, feature = "unstable"))] +pub mod bench { + use test::Bencher; + + #[bench] + fn bench_sends(bench: &mut Bencher) { + let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string()); + let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string()); + lightning::ln::channelmanager::bench::bench_two_sends(bench, persister_a, persister_b); + } +}