]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Migrate to `KVStore`/`FilesystemStore`
authorElias Rohrer <dev@tnull.de>
Tue, 1 Aug 2023 11:37:46 +0000 (13:37 +0200)
committerElias Rohrer <dev@tnull.de>
Thu, 7 Sep 2023 20:49:22 +0000 (22:49 +0200)
Firstly, we switch our BP over to use `FilesystemStore`, which also gives us test
coverage and ensures the compatibility.

Then, we remove the superseded `KVStorePersister` trait and
the `FilesystemPersister` code.

bench/benches/bench.rs
lightning-background-processor/src/lib.rs
lightning-persister/Cargo.toml
lightning-persister/src/lib.rs
lightning-persister/src/util.rs [deleted file]
lightning/src/util/persist.rs

index 54799f44c951422bf8f81bafd88e62dee1e4355c..3fc3abe687b13aafac41074dc891f169771dba14 100644 (file)
@@ -15,7 +15,6 @@ criterion_group!(benches,
        lightning::routing::router::benches::generate_large_mpp_routes_with_probabilistic_scorer,
        lightning::sign::benches::bench_get_secure_random_bytes,
        lightning::ln::channelmanager::bench::bench_sends,
-       lightning_persister::bench::bench_sends,
        lightning_rapid_gossip_sync::bench::bench_reading_full_graph_from_file,
        lightning::routing::gossip::benches::read_network_graph,
        lightning::routing::gossip::benches::write_network_graph);
index 8648920ec2c197be752ab4e8cc8959f08ee14d87..353ed6738d686698a9dd88079587316019f738d2 100644 (file)
@@ -500,9 +500,16 @@ use core::task;
 /// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
 /// could setup `process_events_async` like this:
 /// ```
-/// # struct MyPersister {}
-/// # impl lightning::util::persist::KVStorePersister for MyPersister {
-/// #     fn persist<W: lightning::util::ser::Writeable>(&self, key: &str, object: &W) -> lightning::io::Result<()> { Ok(()) }
+/// # use lightning::io;
+/// # use std::sync::{Arc, Mutex};
+/// # use std::sync::atomic::{AtomicBool, Ordering};
+/// # use lightning_background_processor::{process_events_async, GossipSync};
+/// # struct MyStore {}
+/// # impl lightning::util::persist::KVStore for MyStore {
+/// #     fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
+/// #     fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
+/// #     fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
+/// #     fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
 /// # }
 /// # struct MyEventHandler {}
 /// # impl MyEventHandler {
@@ -514,23 +521,20 @@ use core::task;
 /// #     fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
 /// #     fn disconnect_socket(&mut self) {}
 /// # }
-/// # use std::sync::{Arc, Mutex};
-/// # use std::sync::atomic::{AtomicBool, Ordering};
-/// # use lightning_background_processor::{process_events_async, GossipSync};
 /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
 /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
 /// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
 /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
 /// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
 /// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
-/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyPersister>>;
+/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
 /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
 /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
 /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
 /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
 /// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
 ///
-/// # async fn setup_background_processing(my_persister: Arc<MyPersister>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
+/// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
 ///    let background_persister = Arc::clone(&my_persister);
 ///    let background_event_handler = Arc::clone(&my_event_handler);
 ///    let background_chain_mon = Arc::clone(&my_chain_monitor);
@@ -866,8 +870,8 @@ mod tests {
        use lightning::util::config::UserConfig;
        use lightning::util::ser::Writeable;
        use lightning::util::test_utils;
-       use lightning::util::persist::KVStorePersister;
-       use lightning_persister::FilesystemPersister;
+       use lightning::util::persist::{KVStore, CHANNEL_MANAGER_PERSISTENCE_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, SCORER_PERSISTENCE_NAMESPACE, SCORER_PERSISTENCE_SUB_NAMESPACE, SCORER_PERSISTENCE_KEY};
+       use lightning_persister::fs_store::FilesystemStore;
        use std::collections::VecDeque;
        use std::{fs, env};
        use std::path::PathBuf;
@@ -906,7 +910,7 @@ mod tests {
                        >,
                        Arc<test_utils::TestLogger>>;
 
-       type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
+       type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
 
        type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
        type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
@@ -917,7 +921,7 @@ mod tests {
                rapid_gossip_sync: RGS,
                peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
                chain_monitor: Arc<ChainMonitor>,
-               persister: Arc<FilesystemPersister>,
+               kv_store: Arc<FilesystemStore>,
                tx_broadcaster: Arc<test_utils::TestBroadcaster>,
                network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
                logger: Arc<test_utils::TestLogger>,
@@ -941,9 +945,9 @@ mod tests {
 
        impl Drop for Node {
                fn drop(&mut self) {
-                       let data_dir = self.persister.get_data_dir();
+                       let data_dir = self.kv_store.get_data_dir();
                        match fs::remove_dir_all(data_dir.clone()) {
-                               Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
+                               Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
                                _ => {}
                        }
                }
@@ -954,13 +958,13 @@ mod tests {
                graph_persistence_notifier: Option<SyncSender<()>>,
                manager_error: Option<(std::io::ErrorKind, &'static str)>,
                scorer_error: Option<(std::io::ErrorKind, &'static str)>,
-               filesystem_persister: FilesystemPersister,
+               kv_store: FilesystemStore,
        }
 
        impl Persister {
-               fn new(data_dir: String) -> Self {
-                       let filesystem_persister = FilesystemPersister::new(data_dir);
-                       Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
+               fn new(data_dir: PathBuf) -> Self {
+                       let kv_store = FilesystemStore::new(data_dir);
+                       Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
                }
 
                fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
@@ -980,15 +984,25 @@ mod tests {
                }
        }
 
-       impl KVStorePersister for Persister {
-               fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
-                       if key == "manager" {
+       impl KVStore for Persister {
+               fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
+                       self.kv_store.read(namespace, sub_namespace, key)
+               }
+
+               fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
+                       if namespace == CHANNEL_MANAGER_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE &&
+                               key == CHANNEL_MANAGER_PERSISTENCE_KEY
+                       {
                                if let Some((error, message)) = self.manager_error {
                                        return Err(std::io::Error::new(error, message))
                                }
                        }
 
-                       if key == "network_graph" {
+                       if namespace == NETWORK_GRAPH_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE &&
+                               key == NETWORK_GRAPH_PERSISTENCE_KEY
+                       {
                                if let Some(sender) = &self.graph_persistence_notifier {
                                        match sender.send(()) {
                                                Ok(()) => {},
@@ -1001,13 +1015,24 @@ mod tests {
                                }
                        }
 
-                       if key == "scorer" {
+                       if namespace == SCORER_PERSISTENCE_NAMESPACE &&
+                               sub_namespace == SCORER_PERSISTENCE_SUB_NAMESPACE &&
+                               key == SCORER_PERSISTENCE_KEY
+                       {
                                if let Some((error, message)) = self.scorer_error {
                                        return Err(std::io::Error::new(error, message))
                                }
                        }
 
-                       self.filesystem_persister.persist(key, object)
+                       self.kv_store.write(namespace, sub_namespace, key, buf)
+               }
+
+               fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
+                       self.kv_store.remove(namespace, sub_namespace, key, lazy)
+               }
+
+               fn list(&self, namespace: &str, sub_namespace: &str) -> lightning::io::Result<Vec<String>> {
+                       self.kv_store.list(namespace, sub_namespace)
                }
        }
 
@@ -1157,10 +1182,10 @@ mod tests {
                        let seed = [i as u8; 32];
                        let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
                        let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
-                       let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", &persist_dir, i)));
+                       let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
                        let now = Duration::from_secs(genesis_block.header.time as u64);
                        let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
-                       let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
+                       let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
                        let best_block = BestBlock::from_network(network);
                        let params = ChainParameters { network, best_block };
                        let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
@@ -1172,7 +1197,7 @@ mod tests {
                                onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
                        };
                        let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
-                       let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
+                       let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
                        nodes.push(node);
                }
 
@@ -1267,7 +1292,7 @@ mod tests {
                let tx = open_channel!(nodes[0], nodes[1], 100000);
 
                // Initiate the background processors to watch each node.
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1332,7 +1357,7 @@ mod tests {
                // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
                // `PeerManager::timer_tick_occurred` every `PING_TIMER`.
                let (_, nodes) = create_nodes(1, "test_timer_tick_called");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1359,7 +1384,7 @@ mod tests {
                let (_, nodes) = create_nodes(2, "test_persist_error");
                open_channel!(nodes[0], nodes[1], 100000);
 
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1379,7 +1404,7 @@ mod tests {
                let (_, nodes) = create_nodes(2, "test_persist_error_sync");
                open_channel!(nodes[0], nodes[1], 100000);
 
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
 
                let bp_future = super::process_events_async(
@@ -1405,7 +1430,7 @@ mod tests {
        fn test_network_graph_persist_error() {
                // Test that if we encounter an error during network graph persistence, an error gets returned.
                let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1423,7 +1448,7 @@ mod tests {
        fn test_scorer_persist_error() {
                // Test that if we encounter an error during scorer persistence, an error gets returned.
                let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(),  nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1441,7 +1466,7 @@ mod tests {
        fn test_background_event_handling() {
                let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
                let channel_value = 100000;
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir.clone()));
 
                // Set up a background event handler for FundingGenerationReady events.
@@ -1514,7 +1539,7 @@ mod tests {
        #[test]
        fn test_scorer_persistence() {
                let (_, nodes) = create_nodes(2, "test_scorer_persistence");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let event_handler = |_: _| {};
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
@@ -1586,7 +1611,7 @@ mod tests {
                let (sender, receiver) = std::sync::mpsc::sync_channel(1);
 
                let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
 
                let event_handler = |_: _| {};
@@ -1605,7 +1630,7 @@ mod tests {
                let (sender, receiver) = std::sync::mpsc::sync_channel(1);
 
                let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
 
                let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
@@ -1745,7 +1770,7 @@ mod tests {
                };
 
                let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
                let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
 
@@ -1778,7 +1803,7 @@ mod tests {
                };
 
                let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
-               let data_dir = nodes[0].persister.get_data_dir();
+               let data_dir = nodes[0].kv_store.get_data_dir();
                let persister = Arc::new(Persister::new(data_dir));
 
                let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
index 271f3b882b3546603c79ecbfb478f483e54e95da..7ba4223227d323968cd738eaf9f84d58b3a245dc 100644 (file)
@@ -16,10 +16,8 @@ rustdoc-args = ["--cfg", "docsrs"]
 [dependencies]
 bitcoin = "0.29.0"
 lightning = { version = "0.0.116", path = "../lightning" }
-libc = "0.2"
 
 [target.'cfg(windows)'.dependencies]
-winapi = { version = "0.3", features = ["winbase"] }
 windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
 
 [target.'cfg(ldk_bench)'.dependencies]
index 932e4f41ad3cb549a35271538294060bcdbc19f7..ae258e137d742f32ce1067d6508ad133e02bd67a 100644 (file)
@@ -16,341 +16,3 @@ mod utils;
 
 #[cfg(test)]
 mod test_utils;
-
-mod util;
-
-extern crate lightning;
-extern crate bitcoin;
-extern crate libc;
-
-use bitcoin::hash_types::{BlockHash, Txid};
-use bitcoin::hashes::hex::FromHex;
-use lightning::chain::channelmonitor::ChannelMonitor;
-use lightning::sign::{EntropySource, SignerProvider};
-use lightning::util::ser::{ReadableArgs, Writeable};
-use lightning::util::persist::KVStorePersister;
-use std::fs;
-use std::io::Cursor;
-use std::ops::Deref;
-use std::path::{Path, PathBuf};
-
-/// FilesystemPersister persists channel data on disk, where each channel's
-/// data is stored in a file named after its funding outpoint.
-///
-/// Warning: this module does the best it can with calls to persist data, but it
-/// can only guarantee that the data is passed to the drive. It is up to the
-/// drive manufacturers to do the actual persistence properly, which they often
-/// don't (especially on consumer-grade hardware). Therefore, it is up to the
-/// user to validate their entire storage stack, to ensure the writes are
-/// persistent.
-/// Corollary: especially when dealing with larger amounts of money, it is best
-/// practice to have multiple channel data backups and not rely only on one
-/// FilesystemPersister.
-pub struct FilesystemPersister {
-       path_to_channel_data: String,
-}
-
-impl FilesystemPersister {
-       /// Initialize a new FilesystemPersister and set the path to the individual channels'
-       /// files.
-       pub fn new(path_to_channel_data: String) -> Self {
-               Self {
-                       path_to_channel_data,
-               }
-       }
-
-       /// Get the directory which was provided when this persister was initialized.
-       pub fn get_data_dir(&self) -> String {
-               self.path_to_channel_data.clone()
-       }
-
-       /// Read `ChannelMonitor`s from disk.
-       pub fn read_channelmonitors<ES: Deref, SP: Deref> (
-               &self, entropy_source: ES, signer_provider: SP
-       ) -> std::io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
-               where
-                       ES::Target: EntropySource + Sized,
-                       SP::Target: SignerProvider + Sized
-       {
-               let mut path = PathBuf::from(&self.path_to_channel_data);
-               path.push("monitors");
-               if !Path::new(&path).exists() {
-                       return Ok(Vec::new());
-               }
-               let mut res = Vec::new();
-               for file_option in fs::read_dir(path)? {
-                       let file = file_option.unwrap();
-                       let owned_file_name = file.file_name();
-                       let filename = owned_file_name.to_str()
-                               .ok_or_else(|| std::io::Error::new(std::io::ErrorKind::InvalidData,
-                                       "File name is not a valid utf8 string"))?;
-                       if !filename.is_ascii() || filename.len() < 65 {
-                               return Err(std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid ChannelMonitor file name",
-                               ));
-                       }
-                       if filename.ends_with(".tmp") {
-                               // If we were in the middle of committing an new update and crashed, it should be
-                               // safe to ignore the update - we should never have returned to the caller and
-                               // irrevocably committed to the new state in any way.
-                               continue;
-                       }
-
-                       let txid: Txid = Txid::from_hex(filename.split_at(64).0)
-                               .map_err(|_| std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid tx ID in filename",
-                               ))?;
-
-                       let index: u16 = filename.split_at(65).1.parse()
-                               .map_err(|_| std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "Invalid tx index in filename",
-                               ))?;
-
-                       let contents = fs::read(&file.path())?;
-                       let mut buffer = Cursor::new(&contents);
-                       match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(&mut buffer, (&*entropy_source, &*signer_provider)) {
-                               Ok((blockhash, channel_monitor)) => {
-                                       if channel_monitor.get_funding_txo().0.txid != txid || channel_monitor.get_funding_txo().0.index != index {
-                                               return Err(std::io::Error::new(std::io::ErrorKind::InvalidData,
-                                                                              "ChannelMonitor was stored in the wrong file"));
-                                       }
-                                       res.push((blockhash, channel_monitor));
-                               }
-                               Err(e) => return Err(std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       format!("Failed to deserialize ChannelMonitor: {}", e),
-                               ))
-                       }
-               }
-               Ok(res)
-       }
-}
-
-impl KVStorePersister for FilesystemPersister {
-       fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
-               let mut dest_file = PathBuf::from(self.path_to_channel_data.clone());
-               dest_file.push(key);
-               util::write_to_file(dest_file, object)
-       }
-}
-
-#[cfg(test)]
-mod tests {
-       extern crate lightning;
-       extern crate bitcoin;
-       use crate::FilesystemPersister;
-       use bitcoin::hashes::hex::FromHex;
-       use bitcoin::Txid;
-       use lightning::chain::ChannelMonitorUpdateStatus;
-       use lightning::chain::chainmonitor::Persist;
-       use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
-       use lightning::chain::transaction::OutPoint;
-       use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
-       use lightning::events::{ClosureReason, MessageSendEventsProvider};
-       use lightning::ln::functional_test_utils::*;
-       use lightning::util::test_utils;
-       use std::fs;
-       #[cfg(target_os = "windows")]
-       use {
-               lightning::get_event_msg,
-               lightning::ln::msgs::ChannelMessageHandler,
-       };
-
-       impl Drop for FilesystemPersister {
-               fn drop(&mut self) {
-                       // We test for invalid directory names, so it's OK if directory removal
-                       // fails.
-                       match fs::remove_dir_all(&self.path_to_channel_data) {
-                               Err(e) => println!("Failed to remove test persister directory: {}", e),
-                               _ => {}
-                       }
-               }
-       }
-
-       #[test]
-       fn test_if_monitors_is_not_dir() {
-               let persister = FilesystemPersister::new("test_monitors_is_not_dir".to_string());
-
-               fs::create_dir_all(&persister.path_to_channel_data).unwrap();
-               let mut path = std::path::PathBuf::from(&persister.path_to_channel_data);
-               path.push("monitors");
-               fs::File::create(path).unwrap();
-
-               let chanmon_cfgs = create_chanmon_cfgs(1);
-               let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
-               let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister, node_cfgs[0].keys_manager);
-               node_cfgs[0].chain_monitor = chain_mon_0;
-               let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
-               let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
-
-               // Check that read_channelmonitors() returns error if monitors/ is not a
-               // directory.
-               assert!(persister.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).is_err());
-       }
-
-       // Integration-test the FilesystemPersister. Test relaying a few payments
-       // and check that the persisted data is updated the appropriate number of
-       // times.
-       #[test]
-       fn test_filesystem_persister() {
-               // Create the nodes, giving them FilesystemPersisters for data persisters.
-               let persister_0 = FilesystemPersister::new("test_filesystem_persister_0".to_string());
-               let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, node_cfgs[0].keys_manager);
-               let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, node_cfgs[1].keys_manager);
-               node_cfgs[0].chain_monitor = chain_mon_0;
-               node_cfgs[1].chain_monitor = chain_mon_1;
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-               // Check that the persisted channel data is empty before any channels are
-               // open.
-               let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
-               assert_eq!(persisted_chan_data_0.len(), 0);
-               let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
-               assert_eq!(persisted_chan_data_1.len(), 0);
-
-               // Helper to make sure the channel is on the expected update ID.
-               macro_rules! check_persisted_data {
-                       ($expected_update_id: expr) => {
-                               persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
-                               assert_eq!(persisted_chan_data_0.len(), 1);
-                               for (_, mon) in persisted_chan_data_0.iter() {
-                                       assert_eq!(mon.get_latest_update_id(), $expected_update_id);
-                               }
-                               persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
-                               assert_eq!(persisted_chan_data_1.len(), 1);
-                               for (_, mon) in persisted_chan_data_1.iter() {
-                                       assert_eq!(mon.get_latest_update_id(), $expected_update_id);
-                               }
-                       }
-               }
-
-               // Create some initial channel and check that a channel was persisted.
-               let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
-               check_persisted_data!(0);
-
-               // Send a few payments and make sure the monitors are updated to the latest.
-               send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
-               check_persisted_data!(5);
-               send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
-               check_persisted_data!(10);
-
-               // Force close because cooperative close doesn't result in any persisted
-               // updates.
-               nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
-               check_closed_broadcast!(nodes[0], true);
-               check_added_monitors!(nodes[0], 1);
-
-               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
-               assert_eq!(node_txn.len(), 1);
-
-               connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
-               check_closed_broadcast!(nodes[1], true);
-               check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
-               check_added_monitors!(nodes[1], 1);
-
-               // Make sure everything is persisted as expected after close.
-               check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
-       }
-
-       // Test that if the persister's path to channel data is read-only, writing a
-       // monitor to it results in the persister returning a PermanentFailure.
-       // Windows ignores the read-only flag for folders, so this test is Unix-only.
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_readonly_dir_perm_failure() {
-               let persister = FilesystemPersister::new("test_readonly_dir_perm_failure".to_string());
-               fs::create_dir_all(&persister.path_to_channel_data).unwrap();
-
-               // Set up a dummy channel and force close. This will produce a monitor
-               // that we can then use to test persistence.
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
-               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
-
-               // Set the persister's directory to read-only, which should result in
-               // returning a permanent failure when we then attempt to persist a
-               // channel update.
-               let path = &persister.path_to_channel_data;
-               let mut perms = fs::metadata(path).unwrap().permissions();
-               perms.set_readonly(true);
-               fs::set_permissions(path, perms).unwrap();
-
-               let test_txo = OutPoint {
-                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
-                       index: 0
-               };
-               match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       ChannelMonitorUpdateStatus::PermanentFailure => {},
-                       _ => panic!("unexpected result from persisting new channel")
-               }
-
-               nodes[1].node.get_and_clear_pending_msg_events();
-               added_monitors.clear();
-       }
-
-       // Test that if a persister's directory name is invalid, monitor persistence
-       // will fail.
-       #[cfg(target_os = "windows")]
-       #[test]
-       fn test_fail_on_open() {
-               // Set up a dummy channel and force close. This will produce a monitor
-               // that we can then use to test persistence.
-               let chanmon_cfgs = create_chanmon_cfgs(2);
-               let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-               let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-               let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-               let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
-               nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
-               check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
-               let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
-
-               // Create the persister with an invalid directory name and test that the
-               // channel fails to open because the directories fail to be created. There
-               // don't seem to be invalid filename characters on Unix that Rust doesn't
-               // handle, hence why the test is Windows-only.
-               let persister = FilesystemPersister::new(":<>/".to_string());
-
-               let test_txo = OutPoint {
-                       txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
-                       index: 0
-               };
-               match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
-                       ChannelMonitorUpdateStatus::PermanentFailure => {},
-                       _ => panic!("unexpected result from persisting new channel")
-               }
-
-               nodes[1].node.get_and_clear_pending_msg_events();
-               added_monitors.clear();
-       }
-}
-
-#[cfg(ldk_bench)]
-/// Benches
-pub mod bench {
-       use criterion::Criterion;
-
-       /// Bench!
-       pub fn bench_sends(bench: &mut Criterion) {
-               let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
-               let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
-               lightning::ln::channelmanager::bench::bench_two_sends(
-                       bench, "bench_filesystem_persisted_sends", persister_a, persister_b);
-       }
-}
diff --git a/lightning-persister/src/util.rs b/lightning-persister/src/util.rs
deleted file mode 100644 (file)
index 20c4a81..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-#[cfg(target_os = "windows")]
-extern crate winapi;
-
-use std::fs;
-use std::path::PathBuf;
-use std::io::BufWriter;
-
-#[cfg(not(target_os = "windows"))]
-use std::os::unix::io::AsRawFd;
-
-use lightning::util::ser::Writeable;
-
-#[cfg(target_os = "windows")]
-use {
-       std::ffi::OsStr,
-       std::os::windows::ffi::OsStrExt
-};
-
-#[cfg(target_os = "windows")]
-macro_rules! call {
-       ($e: expr) => (
-               if $e != 0 {
-                       return Ok(())
-               } else {
-                       return Err(std::io::Error::last_os_error())
-               }
-       )
-}
-
-#[cfg(target_os = "windows")]
-fn path_to_windows_str<T: AsRef<OsStr>>(path: T) -> Vec<winapi::shared::ntdef::WCHAR> {
-       path.as_ref().encode_wide().chain(Some(0)).collect()
-}
-
-#[allow(bare_trait_objects)]
-pub(crate) fn write_to_file<W: Writeable>(dest_file: PathBuf, data: &W) -> std::io::Result<()> {
-       let mut tmp_file = dest_file.clone();
-       tmp_file.set_extension("tmp");
-
-       let parent_directory = dest_file.parent().unwrap();
-       fs::create_dir_all(parent_directory)?;
-       // Do a crazy dance with lots of fsync()s to be overly cautious here...
-       // We never want to end up in a state where we've lost the old data, or end up using the
-       // old data on power loss after we've returned.
-       // The way to atomically write a file on Unix platforms is:
-       // open(tmpname), write(tmpfile), fsync(tmpfile), close(tmpfile), rename(), fsync(dir)
-       {
-               // Note that going by rust-lang/rust@d602a6b, on MacOS it is only safe to use
-               // rust stdlib 1.36 or higher.
-               let mut buf = BufWriter::new(fs::File::create(&tmp_file)?);
-               data.write(&mut buf)?;
-               buf.into_inner()?.sync_all()?;
-       }
-       // Fsync the parent directory on Unix.
-       #[cfg(not(target_os = "windows"))]
-       {
-               fs::rename(&tmp_file, &dest_file)?;
-               let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
-               unsafe { libc::fsync(dir_file.as_raw_fd()); }
-       }
-       #[cfg(target_os = "windows")]
-       {
-               if dest_file.exists() {
-                       unsafe {winapi::um::winbase::ReplaceFileW(
-                               path_to_windows_str(dest_file).as_ptr(), path_to_windows_str(tmp_file).as_ptr(), std::ptr::null(),
-                               winapi::um::winbase::REPLACEFILE_IGNORE_MERGE_ERRORS,
-                               std::ptr::null_mut() as *mut winapi::ctypes::c_void,
-                               std::ptr::null_mut() as *mut winapi::ctypes::c_void
-                       )};
-               } else {
-                       call!(unsafe {winapi::um::winbase::MoveFileExW(
-                               path_to_windows_str(tmp_file).as_ptr(), path_to_windows_str(dest_file).as_ptr(),
-                               winapi::um::winbase::MOVEFILE_WRITE_THROUGH | winapi::um::winbase::MOVEFILE_REPLACE_EXISTING
-                       )});
-               }
-       }
-       Ok(())
-}
-
-#[cfg(test)]
-mod tests {
-       use lightning::util::ser::{Writer, Writeable};
-
-       use super::{write_to_file};
-       use std::fs;
-       use std::io;
-       use std::path::PathBuf;
-
-       struct TestWriteable{}
-       impl Writeable for TestWriteable {
-               fn write<W: Writer>(&self, writer: &mut W) -> Result<(), std::io::Error> {
-                       writer.write_all(&[42; 1])
-               }
-       }
-
-       // Test that if the persister's path to channel data is read-only, writing
-       // data to it fails. Windows ignores the read-only flag for folders, so this
-       // test is Unix-only.
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_readonly_dir() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_readonly_dir_persister_filename".to_string();
-               let path = "test_readonly_dir_persister_dir";
-               fs::create_dir_all(path).unwrap();
-               let mut perms = fs::metadata(path).unwrap().permissions();
-               perms.set_readonly(true);
-               fs::set_permissions(path, perms).unwrap();
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => assert_eq!(e.kind(), io::ErrorKind::PermissionDenied),
-                       _ => panic!("Unexpected error message")
-               }
-       }
-
-       // Test failure to rename in the process of atomically creating a channel
-       // monitor's file. We induce this failure by making the `tmp` file a
-       // directory.
-       // Explanation: given "from" = the file being renamed, "to" = the destination
-       // file that already exists: Unix should fail because if "from" is a file,
-       // then "to" is also required to be a file.
-       // TODO: ideally try to make this work on Windows again
-       #[cfg(not(target_os = "windows"))]
-       #[test]
-       fn test_rename_failure() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_rename_failure_filename";
-               let path = "test_rename_failure_dir";
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               // Create the channel data file and make it a directory.
-               fs::create_dir_all(dest_file.clone()).unwrap();
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => assert_eq!(e.raw_os_error(), Some(libc::EISDIR)),
-                       _ => panic!("Unexpected Ok(())")
-               }
-               fs::remove_dir_all(path).unwrap();
-       }
-
-       #[test]
-       fn test_diskwriteable_failure() {
-               struct FailingWriteable {}
-               impl Writeable for FailingWriteable {
-                       fn write<W: Writer>(&self, _writer: &mut W) -> Result<(), std::io::Error> {
-                               Err(std::io::Error::new(std::io::ErrorKind::Other, "expected failure"))
-                       }
-               }
-
-               let filename = "test_diskwriteable_failure";
-               let path = "test_diskwriteable_failure_dir";
-               let test_writeable = FailingWriteable{};
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => {
-                               assert_eq!(e.kind(), std::io::ErrorKind::Other);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "expected failure");
-                       },
-                       _ => panic!("unexpected result")
-               }
-               fs::remove_dir_all(path).unwrap();
-       }
-
-       // Test failure to create the temporary file in the persistence process.
-       // We induce this failure by having the temp file already exist and be a
-       // directory.
-       #[test]
-       fn test_tmp_file_creation_failure() {
-               let test_writeable = TestWriteable{};
-               let filename = "test_tmp_file_creation_failure_filename".to_string();
-               let path = "test_tmp_file_creation_failure_dir";
-               let mut dest_file = PathBuf::from(path);
-               dest_file.push(filename);
-               let mut tmp_file = dest_file.clone();
-               tmp_file.set_extension("tmp");
-               fs::create_dir_all(tmp_file).unwrap();
-               match write_to_file(dest_file, &test_writeable) {
-                       Err(e) => {
-                               #[cfg(not(target_os = "windows"))]
-                               assert_eq!(e.raw_os_error(), Some(libc::EISDIR));
-                               #[cfg(target_os = "windows")]
-                               assert_eq!(e.kind(), io::ErrorKind::PermissionDenied);
-                       }
-                       _ => panic!("Unexpected error message")
-               }
-       }
-}
index 5b122b7a5707cf990ae5126a862a596569ba47f8..ca0605c95983afd3b370cafddaf82de2868dadfb 100644 (file)
@@ -114,15 +114,6 @@ pub trait KVStore {
        fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>>;
 }
 
-/// Trait for a key-value store for persisting some writeable object at some key
-/// Implementing `KVStorePersister` provides auto-implementations for [`Persister`]
-/// and [`Persist`] traits.  It uses "manager", "network_graph",
-/// and "monitors/{funding_txo_id}_{funding_txo_index}" for keys.
-pub trait KVStorePersister {
-       /// Persist the given writeable using the provided key
-       fn persist<W: Writeable>(&self, key: &str, object: &W) -> io::Result<()>;
-}
-
 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
 pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
        where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
@@ -144,7 +135,8 @@ pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F:
        fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
 }
 
-impl<'a, A: KVStorePersister, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
+
+impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
        where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
@@ -154,39 +146,56 @@ impl<'a, A: KVStorePersister, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Dere
                R::Target: 'static + Router,
                L::Target: 'static + Logger,
 {
-       /// Persist the given ['ChannelManager'] to disk with the name "manager", returning an error if persistence failed.
+       /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
        fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
-               self.persist("manager", channel_manager)
+               self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
+                                  CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
+                                  CHANNEL_MANAGER_PERSISTENCE_KEY,
+                                  &channel_manager.encode())
        }
 
-       /// Persist the given [`NetworkGraph`] to disk with the name "network_graph", returning an error if persistence failed.
+       /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
        fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
-               self.persist("network_graph", network_graph)
+               self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
+                                  NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
+                                  NETWORK_GRAPH_PERSISTENCE_KEY,
+                                  &network_graph.encode())
        }
 
-       /// Persist the given [`WriteableScore`] to disk with name "scorer", returning an error if persistence failed.
+       /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
        fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
-               self.persist("scorer", &scorer)
+               self.write(SCORER_PERSISTENCE_NAMESPACE,
+                                  SCORER_PERSISTENCE_SUB_NAMESPACE,
+                                  SCORER_PERSISTENCE_KEY,
+                                  &scorer.encode())
        }
 }
 
-impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStorePersister> Persist<ChannelSigner> for K {
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
        // TODO: We really need a way for the persister to inform the user that its time to crash/shut
        // down once these start returning failure.
        // A PermanentFailure implies we should probably just shut down the node since we're
        // force-closing channels without even broadcasting!
 
        fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               match self.persist(&key, monitor) {
+               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               match self.write(
+                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       &key, &monitor.encode())
+               {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
                        Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
                }
        }
 
        fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
-               match self.persist(&key, monitor) {
+               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               match self.write(
+                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       &key, &monitor.encode())
+               {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
                        Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
                }