f Reintroduce per-write tmp files
[rust-lightning] / lightning-persister / src / lib.rs
1 //! Provides utilities for LDK data persistence and retrieval.
2 //
3 // TODO: Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
4 #![deny(broken_intra_doc_links)]
5 #![deny(private_intra_doc_links)]
6
7 #![deny(missing_docs)]
8
9 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
10
11 #[cfg(ldk_bench)] extern crate criterion;
12
13 pub mod fs_store;
14
15 #[cfg(test)]
16 mod test_utils;
17
18 mod util;
19
20 extern crate lightning;
21 extern crate bitcoin;
22 extern crate libc;
23
24 use bitcoin::hash_types::{BlockHash, Txid};
25 use bitcoin::hashes::hex::FromHex;
26 use lightning::chain::channelmonitor::ChannelMonitor;
27 use lightning::sign::{EntropySource, SignerProvider};
28 use lightning::util::ser::{ReadableArgs, Writeable};
29 use lightning::util::persist::KVStorePersister;
30 use std::fs;
31 use std::io::Cursor;
32 use std::ops::Deref;
33 use std::path::{Path, PathBuf};
34
35 /// FilesystemPersister persists channel data on disk, where each channel's
36 /// data is stored in a file named after its funding outpoint.
37 ///
38 /// Warning: this module does the best it can with calls to persist data, but it
39 /// can only guarantee that the data is passed to the drive. It is up to the
40 /// drive manufacturers to do the actual persistence properly, which they often
41 /// don't (especially on consumer-grade hardware). Therefore, it is up to the
42 /// user to validate their entire storage stack, to ensure the writes are
43 /// persistent.
44 /// Corollary: especially when dealing with larger amounts of money, it is best
45 /// practice to have multiple channel data backups and not rely only on one
46 /// FilesystemPersister.
47 pub struct FilesystemPersister {
48         path_to_channel_data: String,
49 }
50
51 impl FilesystemPersister {
52         /// Initialize a new FilesystemPersister and set the path to the individual channels'
53         /// files.
54         pub fn new(path_to_channel_data: String) -> Self {
55                 Self {
56                         path_to_channel_data,
57                 }
58         }
59
60         /// Get the directory which was provided when this persister was initialized.
61         pub fn get_data_dir(&self) -> String {
62                 self.path_to_channel_data.clone()
63         }
64
65         /// Read `ChannelMonitor`s from disk.
66         pub fn read_channelmonitors<ES: Deref, SP: Deref> (
67                 &self, entropy_source: ES, signer_provider: SP
68         ) -> std::io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
69                 where
70                         ES::Target: EntropySource + Sized,
71                         SP::Target: SignerProvider + Sized
72         {
73                 let mut path = PathBuf::from(&self.path_to_channel_data);
74                 path.push("monitors");
75                 if !Path::new(&path).exists() {
76                         return Ok(Vec::new());
77                 }
78                 let mut res = Vec::new();
79                 for file_option in fs::read_dir(path)? {
80                         let file = file_option.unwrap();
81                         let owned_file_name = file.file_name();
82                         let filename = owned_file_name.to_str()
83                                 .ok_or_else(|| std::io::Error::new(std::io::ErrorKind::InvalidData,
84                                         "File name is not a valid utf8 string"))?;
85                         if !filename.is_ascii() || filename.len() < 65 {
86                                 return Err(std::io::Error::new(
87                                         std::io::ErrorKind::InvalidData,
88                                         "Invalid ChannelMonitor file name",
89                                 ));
90                         }
91                         if filename.ends_with(".tmp") {
92                                 // If we were in the middle of committing an new update and crashed, it should be
93                                 // safe to ignore the update - we should never have returned to the caller and
94                                 // irrevocably committed to the new state in any way.
95                                 continue;
96                         }
97
98                         let txid: Txid = Txid::from_hex(filename.split_at(64).0)
99                                 .map_err(|_| std::io::Error::new(
100                                         std::io::ErrorKind::InvalidData,
101                                         "Invalid tx ID in filename",
102                                 ))?;
103
104                         let index: u16 = filename.split_at(65).1.parse()
105                                 .map_err(|_| std::io::Error::new(
106                                         std::io::ErrorKind::InvalidData,
107                                         "Invalid tx index in filename",
108                                 ))?;
109
110                         let contents = fs::read(&file.path())?;
111                         let mut buffer = Cursor::new(&contents);
112                         match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(&mut buffer, (&*entropy_source, &*signer_provider)) {
113                                 Ok((blockhash, channel_monitor)) => {
114                                         if channel_monitor.get_funding_txo().0.txid != txid || channel_monitor.get_funding_txo().0.index != index {
115                                                 return Err(std::io::Error::new(std::io::ErrorKind::InvalidData,
116                                                                                "ChannelMonitor was stored in the wrong file"));
117                                         }
118                                         res.push((blockhash, channel_monitor));
119                                 }
120                                 Err(e) => return Err(std::io::Error::new(
121                                         std::io::ErrorKind::InvalidData,
122                                         format!("Failed to deserialize ChannelMonitor: {}", e),
123                                 ))
124                         }
125                 }
126                 Ok(res)
127         }
128 }
129
130 impl KVStorePersister for FilesystemPersister {
131         fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
132                 let mut dest_file = PathBuf::from(self.path_to_channel_data.clone());
133                 dest_file.push(key);
134                 util::write_to_file(dest_file, object)
135         }
136 }
137
138 #[cfg(test)]
139 mod tests {
140         extern crate lightning;
141         extern crate bitcoin;
142         use crate::FilesystemPersister;
143         use bitcoin::hashes::hex::FromHex;
144         use bitcoin::Txid;
145         use lightning::chain::ChannelMonitorUpdateStatus;
146         use lightning::chain::chainmonitor::Persist;
147         use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
148         use lightning::chain::transaction::OutPoint;
149         use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
150         use lightning::events::{ClosureReason, MessageSendEventsProvider};
151         use lightning::ln::functional_test_utils::*;
152         use lightning::util::test_utils;
153         use std::fs;
154         #[cfg(target_os = "windows")]
155         use {
156                 lightning::get_event_msg,
157                 lightning::ln::msgs::ChannelMessageHandler,
158         };
159
160         impl Drop for FilesystemPersister {
161                 fn drop(&mut self) {
162                         // We test for invalid directory names, so it's OK if directory removal
163                         // fails.
164                         match fs::remove_dir_all(&self.path_to_channel_data) {
165                                 Err(e) => println!("Failed to remove test persister directory: {}", e),
166                                 _ => {}
167                         }
168                 }
169         }
170
171         #[test]
172         fn test_if_monitors_is_not_dir() {
173                 let persister = FilesystemPersister::new("test_monitors_is_not_dir".to_string());
174
175                 fs::create_dir_all(&persister.path_to_channel_data).unwrap();
176                 let mut path = std::path::PathBuf::from(&persister.path_to_channel_data);
177                 path.push("monitors");
178                 fs::File::create(path).unwrap();
179
180                 let chanmon_cfgs = create_chanmon_cfgs(1);
181                 let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
182                 let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister, node_cfgs[0].keys_manager);
183                 node_cfgs[0].chain_monitor = chain_mon_0;
184                 let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
185                 let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
186
187                 // Check that read_channelmonitors() returns error if monitors/ is not a
188                 // directory.
189                 assert!(persister.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).is_err());
190         }
191
192         // Integration-test the FilesystemPersister. Test relaying a few payments
193         // and check that the persisted data is updated the appropriate number of
194         // times.
195         #[test]
196         fn test_filesystem_persister() {
197                 // Create the nodes, giving them FilesystemPersisters for data persisters.
198                 let persister_0 = FilesystemPersister::new("test_filesystem_persister_0".to_string());
199                 let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
200                 let chanmon_cfgs = create_chanmon_cfgs(2);
201                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
202                 let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, node_cfgs[0].keys_manager);
203                 let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, node_cfgs[1].keys_manager);
204                 node_cfgs[0].chain_monitor = chain_mon_0;
205                 node_cfgs[1].chain_monitor = chain_mon_1;
206                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
207                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
208
209                 // Check that the persisted channel data is empty before any channels are
210                 // open.
211                 let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
212                 assert_eq!(persisted_chan_data_0.len(), 0);
213                 let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
214                 assert_eq!(persisted_chan_data_1.len(), 0);
215
216                 // Helper to make sure the channel is on the expected update ID.
217                 macro_rules! check_persisted_data {
218                         ($expected_update_id: expr) => {
219                                 persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
220                                 assert_eq!(persisted_chan_data_0.len(), 1);
221                                 for (_, mon) in persisted_chan_data_0.iter() {
222                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
223                                 }
224                                 persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
225                                 assert_eq!(persisted_chan_data_1.len(), 1);
226                                 for (_, mon) in persisted_chan_data_1.iter() {
227                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
228                                 }
229                         }
230                 }
231
232                 // Create some initial channel and check that a channel was persisted.
233                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
234                 check_persisted_data!(0);
235
236                 // Send a few payments and make sure the monitors are updated to the latest.
237                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
238                 check_persisted_data!(5);
239                 send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
240                 check_persisted_data!(10);
241
242                 // Force close because cooperative close doesn't result in any persisted
243                 // updates.
244                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
245                 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
246                 check_closed_broadcast!(nodes[0], true);
247                 check_added_monitors!(nodes[0], 1);
248
249                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
250                 assert_eq!(node_txn.len(), 1);
251
252                 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
253                 check_closed_broadcast!(nodes[1], true);
254                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
255                 check_added_monitors!(nodes[1], 1);
256
257                 // Make sure everything is persisted as expected after close.
258                 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
259         }
260
261         // Test that if the persister's path to channel data is read-only, writing a
262         // monitor to it results in the persister returning a PermanentFailure.
263         // Windows ignores the read-only flag for folders, so this test is Unix-only.
264         #[cfg(not(target_os = "windows"))]
265         #[test]
266         fn test_readonly_dir_perm_failure() {
267                 let persister = FilesystemPersister::new("test_readonly_dir_perm_failure".to_string());
268                 fs::create_dir_all(&persister.path_to_channel_data).unwrap();
269
270                 // Set up a dummy channel and force close. This will produce a monitor
271                 // that we can then use to test persistence.
272                 let chanmon_cfgs = create_chanmon_cfgs(2);
273                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
274                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
275                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
276                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
277                 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
278                 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
279                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
280                 let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
281                 let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
282
283                 // Set the persister's directory to read-only, which should result in
284                 // returning a permanent failure when we then attempt to persist a
285                 // channel update.
286                 let path = &persister.path_to_channel_data;
287                 let mut perms = fs::metadata(path).unwrap().permissions();
288                 perms.set_readonly(true);
289                 fs::set_permissions(path, perms).unwrap();
290
291                 let test_txo = OutPoint {
292                         txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
293                         index: 0
294                 };
295                 match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
296                         ChannelMonitorUpdateStatus::PermanentFailure => {},
297                         _ => panic!("unexpected result from persisting new channel")
298                 }
299
300                 nodes[1].node.get_and_clear_pending_msg_events();
301                 added_monitors.clear();
302         }
303
304         // Test that if a persister's directory name is invalid, monitor persistence
305         // will fail.
306         #[cfg(target_os = "windows")]
307         #[test]
308         fn test_fail_on_open() {
309                 // Set up a dummy channel and force close. This will produce a monitor
310                 // that we can then use to test persistence.
311                 let chanmon_cfgs = create_chanmon_cfgs(2);
312                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
313                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
314                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
315                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
316                 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
317                 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
318                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
319                 let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
320                 let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
321
322                 // Create the persister with an invalid directory name and test that the
323                 // channel fails to open because the directories fail to be created. There
324                 // don't seem to be invalid filename characters on Unix that Rust doesn't
325                 // handle, hence why the test is Windows-only.
326                 let persister = FilesystemPersister::new(":<>/".to_string());
327
328                 let test_txo = OutPoint {
329                         txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
330                         index: 0
331                 };
332                 match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
333                         ChannelMonitorUpdateStatus::PermanentFailure => {},
334                         _ => panic!("unexpected result from persisting new channel")
335                 }
336
337                 nodes[1].node.get_and_clear_pending_msg_events();
338                 added_monitors.clear();
339         }
340 }
341
342 #[cfg(ldk_bench)]
343 /// Benches
344 pub mod bench {
345         use criterion::Criterion;
346
347         /// Bench!
348         pub fn bench_sends(bench: &mut Criterion) {
349                 let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
350                 let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
351                 lightning::ln::channelmanager::bench::bench_two_sends(
352                         bench, "bench_filesystem_persisted_sends", persister_a, persister_b);
353         }
354 }