X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning-persister%2Fsrc%2Flib.rs;h=8effedbce65d752e7ceaede1326c5a4304b6f056;hb=0a11eb19ab8922dae6a827192d65661265d516e4;hp=2226bcba6095373f7ae3dc2f7b3db2f7cf5e034c;hpb=b5d88a5422913a0a8950455c5f86764a042429d7;p=rust-lightning diff --git a/lightning-persister/src/lib.rs b/lightning-persister/src/lib.rs index 2226bcba..8effedbc 100644 --- a/lightning-persister/src/lib.rs +++ b/lightning-persister/src/lib.rs @@ -1,10 +1,19 @@ +//! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs. + +#![deny(broken_intra_doc_links)] +#![deny(missing_docs)] + +#![cfg_attr(all(test, feature = "unstable"), feature(test))] +#[cfg(all(test, feature = "unstable"))] extern crate test; + mod util; extern crate lightning; extern crate bitcoin; extern crate libc; -use bitcoin::hashes::hex::ToHex; +use bitcoin::{BlockHash, Txid}; +use bitcoin::hashes::hex::{FromHex, ToHex}; use crate::util::DiskWriteable; use lightning::chain; use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; @@ -14,20 +23,12 @@ use lightning::chain::keysinterface::{Sign, KeysInterface}; use lightning::chain::transaction::OutPoint; use lightning::ln::channelmanager::ChannelManager; use lightning::util::logger::Logger; -use lightning::util::ser::Writeable; +use lightning::util::ser::{ReadableArgs, Writeable}; +use std::collections::HashMap; use std::fs; -use std::io::Error; -use std::path::PathBuf; -use std::sync::Arc; - -#[cfg(test)] -use { - lightning::util::ser::ReadableArgs, - bitcoin::{BlockHash, Txid}, - bitcoin::hashes::hex::FromHex, - std::collections::HashMap, - std::io::Cursor -}; +use std::io::{Cursor, Error}; +use std::ops::Deref; +use std::path::{Path, PathBuf}; /// FilesystemPersister persists channel data on disk, where each channel's /// data is stored in a file named after its funding outpoint. @@ -51,12 +52,12 @@ impl DiskWriteable for ChannelMonitor { } } -impl DiskWriteable for ChannelManager, Arc, Arc, Arc, Arc> -where M: chain::Watch, - T: BroadcasterInterface, - K: KeysInterface, - F: FeeEstimator, - L: Logger, +impl DiskWriteable for ChannelManager +where M::Target: chain::Watch, + T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, + L::Target: Logger { fn write_to_file(&self, writer: &mut fs::File) -> Result<(), std::io::Error> { self.write(writer) @@ -72,6 +73,7 @@ impl FilesystemPersister { } } + /// Get the directory which was provided when this persister was initialized. pub fn get_data_dir(&self) -> String { self.path_to_channel_data.clone() } @@ -84,57 +86,78 @@ impl FilesystemPersister { /// Writes the provided `ChannelManager` to the path provided at `FilesystemPersister` /// initialization, within a file called "manager". - pub fn persist_manager( + pub fn persist_manager( data_dir: String, - manager: &ChannelManager, Arc, Arc, Arc, Arc> + manager: &ChannelManager ) -> Result<(), std::io::Error> - where Signer: Sign, - M: chain::Watch, - T: BroadcasterInterface, - K: KeysInterface, - F: FeeEstimator, - L: Logger + where M::Target: chain::Watch, + T::Target: BroadcasterInterface, + K::Target: KeysInterface, + F::Target: FeeEstimator, + L::Target: Logger { let path = PathBuf::from(data_dir); util::write_to_file(path, "manager".to_string(), manager) } - #[cfg(test)] - fn load_channel_data(&self, keys: &Keys) -> - Result>, ChannelMonitorUpdateErr> { - if let Err(_) = fs::create_dir_all(self.path_to_monitor_data()) { - return Err(ChannelMonitorUpdateErr::PermanentFailure); + /// Read `ChannelMonitor`s from disk. + pub fn read_channelmonitors ( + &self, keys_manager: K + ) -> Result)>, std::io::Error> + where K::Target: KeysInterface + Sized + { + let path = self.path_to_monitor_data(); + if !Path::new(&path).exists() { + return Ok(HashMap::new()); + } + let mut outpoint_to_channelmonitor = HashMap::new(); + for file_option in fs::read_dir(path).unwrap() { + let file = file_option.unwrap(); + let owned_file_name = file.file_name(); + let filename = owned_file_name.to_str(); + if !filename.is_some() || !filename.unwrap().is_ascii() || filename.unwrap().len() < 65 { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid ChannelMonitor file name", + )); } - let mut res = HashMap::new(); - for file_option in fs::read_dir(self.path_to_monitor_data()).unwrap() { - let file = file_option.unwrap(); - let owned_file_name = file.file_name(); - let filename = owned_file_name.to_str(); - if !filename.is_some() || !filename.unwrap().is_ascii() || filename.unwrap().len() < 65 { - return Err(ChannelMonitorUpdateErr::PermanentFailure); - } - - let txid = Txid::from_hex(filename.unwrap().split_at(64).0); - if txid.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); } - let index = filename.unwrap().split_at(65).1.split('.').next().unwrap().parse(); - if index.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); } + let txid = Txid::from_hex(filename.unwrap().split_at(64).0); + if txid.is_err() { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid tx ID in filename", + )); + } - let contents = fs::read(&file.path()); - if contents.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); } + let index = filename.unwrap().split_at(65).1.parse(); + if index.is_err() { + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Invalid tx index in filename", + )); + } - if let Ok((_, loaded_monitor)) = - <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&contents.unwrap()), keys) { - res.insert(OutPoint { txid: txid.unwrap(), index: index.unwrap() }, loaded_monitor); - } else { - return Err(ChannelMonitorUpdateErr::PermanentFailure); - } + let contents = fs::read(&file.path())?; + let mut buffer = Cursor::new(&contents); + match <(BlockHash, ChannelMonitor)>::read(&mut buffer, &*keys_manager) { + Ok((blockhash, channel_monitor)) => { + outpoint_to_channelmonitor.insert( + OutPoint { txid: txid.unwrap(), index: index.unwrap() }, + (blockhash, channel_monitor), + ); + } + Err(e) => return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + format!("Failed to deserialize ChannelMonitor: {}", e), + )) } - Ok(res) } + Ok(outpoint_to_channelmonitor) + } } -impl channelmonitor::Persist for FilesystemPersister { +impl channelmonitor::Persist for FilesystemPersister { fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> { let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index); util::write_to_file(self.path_to_monitor_data(), filename, monitor) @@ -201,22 +224,22 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. - let mut persisted_chan_data_0 = persister_0.load_channel_data(nodes[0].keys_manager).unwrap(); + let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap(); assert_eq!(persisted_chan_data_0.keys().len(), 0); - let mut persisted_chan_data_1 = persister_1.load_channel_data(nodes[1].keys_manager).unwrap(); + let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap(); assert_eq!(persisted_chan_data_1.keys().len(), 0); // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { ($expected_update_id: expr) => { - persisted_chan_data_0 = persister_0.load_channel_data(nodes[0].keys_manager).unwrap(); + persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap(); assert_eq!(persisted_chan_data_0.keys().len(), 1); - for mon in persisted_chan_data_0.values() { + for (_, mon) in persisted_chan_data_0.values() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); } - persisted_chan_data_1 = persister_1.load_channel_data(nodes[1].keys_manager).unwrap(); + persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap(); assert_eq!(persisted_chan_data_1.keys().len(), 1); - for mon in persisted_chan_data_1.values() { + for (_, mon) in persisted_chan_data_1.values() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); } } @@ -235,15 +258,15 @@ mod tests { // Force close because cooperative close doesn't result in any persisted // updates. nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap(); - check_closed_broadcast!(nodes[0], false); + check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); - let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]}, 1); - check_closed_broadcast!(nodes[1], false); + let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; + connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]}); + check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); // Make sure everything is persisted as expected after close. @@ -324,3 +347,15 @@ mod tests { added_monitors.clear(); } } + +#[cfg(all(test, feature = "unstable"))] +pub mod bench { + use test::Bencher; + + #[bench] + fn bench_sends(bench: &mut Bencher) { + let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string()); + let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string()); + lightning::ln::channelmanager::bench::bench_two_sends(bench, persister_a, persister_b); + } +}