b8212f0fc705b3bac8e6925df3f0a1deb375116b
[rust-lightning] / lightning-persister / src / lib.rs
1 //! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs.
2
3 #![deny(broken_intra_doc_links)]
4 #![deny(missing_docs)]
5
6 #![cfg_attr(all(test, feature = "_bench_unstable"), feature(test))]
7 #[cfg(all(test, feature = "_bench_unstable"))] extern crate test;
8
9 mod util;
10
11 extern crate lightning;
12 extern crate bitcoin;
13 extern crate libc;
14
15 use bitcoin::hash_types::{BlockHash, Txid};
16 use bitcoin::hashes::hex::{FromHex, ToHex};
17 use crate::util::DiskWriteable;
18 use lightning::chain;
19 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
20 use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate};
21 use lightning::chain::chainmonitor;
22 use lightning::chain::keysinterface::{Sign, KeysInterface};
23 use lightning::chain::transaction::OutPoint;
24 use lightning::ln::channelmanager::ChannelManager;
25 use lightning::util::logger::Logger;
26 use lightning::util::ser::{ReadableArgs, Writeable};
27 use std::fs;
28 use std::io::{Cursor, Error};
29 use std::ops::Deref;
30 use std::path::{Path, PathBuf};
31
32 /// FilesystemPersister persists channel data on disk, where each channel's
33 /// data is stored in a file named after its funding outpoint.
34 ///
35 /// Warning: this module does the best it can with calls to persist data, but it
36 /// can only guarantee that the data is passed to the drive. It is up to the
37 /// drive manufacturers to do the actual persistence properly, which they often
38 /// don't (especially on consumer-grade hardware). Therefore, it is up to the
39 /// user to validate their entire storage stack, to ensure the writes are
40 /// persistent.
41 /// Corollary: especially when dealing with larger amounts of money, it is best
42 /// practice to have multiple channel data backups and not rely only on one
43 /// FilesystemPersister.
44 pub struct FilesystemPersister {
45         path_to_channel_data: String,
46 }
47
48 impl<Signer: Sign> DiskWriteable for ChannelMonitor<Signer> {
49         fn write_to_file(&self, writer: &mut fs::File) -> Result<(), Error> {
50                 self.write(writer)
51         }
52 }
53
54 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> DiskWriteable for ChannelManager<Signer, M, T, K, F, L>
55 where
56         M::Target: chain::Watch<Signer>,
57         T::Target: BroadcasterInterface,
58         K::Target: KeysInterface<Signer=Signer>,
59         F::Target: FeeEstimator,
60         L::Target: Logger,
61 {
62         fn write_to_file(&self, writer: &mut fs::File) -> Result<(), std::io::Error> {
63                 self.write(writer)
64         }
65 }
66
67 impl FilesystemPersister {
68         /// Initialize a new FilesystemPersister and set the path to the individual channels'
69         /// files.
70         pub fn new(path_to_channel_data: String) -> Self {
71                 return Self {
72                         path_to_channel_data,
73                 }
74         }
75
76         /// Get the directory which was provided when this persister was initialized.
77         pub fn get_data_dir(&self) -> String {
78                 self.path_to_channel_data.clone()
79         }
80
81         pub(crate) fn path_to_monitor_data(&self) -> PathBuf {
82                 let mut path = PathBuf::from(self.path_to_channel_data.clone());
83                 path.push("monitors");
84                 path
85         }
86
87         /// Writes the provided `ChannelManager` to the path provided at `FilesystemPersister`
88         /// initialization, within a file called "manager".
89         pub fn persist_manager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
90                 data_dir: String,
91                 manager: &ChannelManager<Signer, M, T, K, F, L>
92         ) -> Result<(), std::io::Error>
93         where
94                 M::Target: chain::Watch<Signer>,
95                 T::Target: BroadcasterInterface,
96                 K::Target: KeysInterface<Signer=Signer>,
97                 F::Target: FeeEstimator,
98                 L::Target: Logger,
99         {
100                 let path = PathBuf::from(data_dir);
101                 util::write_to_file(path, "manager".to_string(), manager)
102         }
103
104         /// Read `ChannelMonitor`s from disk.
105         pub fn read_channelmonitors<Signer: Sign, K: Deref> (
106                 &self, keys_manager: K
107         ) -> Result<Vec<(BlockHash, ChannelMonitor<Signer>)>, std::io::Error>
108                 where K::Target: KeysInterface<Signer=Signer> + Sized,
109         {
110                 let path = self.path_to_monitor_data();
111                 if !Path::new(&path).exists() {
112                         return Ok(Vec::new());
113                 }
114                 let mut res = Vec::new();
115                 for file_option in fs::read_dir(path).unwrap() {
116                         let file = file_option.unwrap();
117                         let owned_file_name = file.file_name();
118                         let filename = owned_file_name.to_str();
119                         if !filename.is_some() || !filename.unwrap().is_ascii() || filename.unwrap().len() < 65 {
120                                 return Err(std::io::Error::new(
121                                         std::io::ErrorKind::InvalidData,
122                                         "Invalid ChannelMonitor file name",
123                                 ));
124                         }
125                         if filename.unwrap().ends_with(".tmp") {
126                                 // If we were in the middle of committing an new update and crashed, it should be
127                                 // safe to ignore the update - we should never have returned to the caller and
128                                 // irrevocably committed to the new state in any way.
129                                 continue;
130                         }
131
132                         let txid = Txid::from_hex(filename.unwrap().split_at(64).0);
133                         if txid.is_err() {
134                                 return Err(std::io::Error::new(
135                                         std::io::ErrorKind::InvalidData,
136                                         "Invalid tx ID in filename",
137                                 ));
138                         }
139
140                         let index = filename.unwrap().split_at(65).1.parse();
141                         if index.is_err() {
142                                 return Err(std::io::Error::new(
143                                         std::io::ErrorKind::InvalidData,
144                                         "Invalid tx index in filename",
145                                 ));
146                         }
147
148                         let contents = fs::read(&file.path())?;
149                         let mut buffer = Cursor::new(&contents);
150                         match <(BlockHash, ChannelMonitor<Signer>)>::read(&mut buffer, &*keys_manager) {
151                                 Ok((blockhash, channel_monitor)) => {
152                                         if channel_monitor.get_funding_txo().0.txid != txid.unwrap() || channel_monitor.get_funding_txo().0.index != index.unwrap() {
153                                                 return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "ChannelMonitor was stored in the wrong file"));
154                                         }
155                                         res.push((blockhash, channel_monitor));
156                                 }
157                                 Err(e) => return Err(std::io::Error::new(
158                                         std::io::ErrorKind::InvalidData,
159                                         format!("Failed to deserialize ChannelMonitor: {}", e),
160                                 ))
161                         }
162                 }
163                 Ok(res)
164         }
165 }
166
167 impl<ChannelSigner: Sign> chainmonitor::Persist<ChannelSigner> for FilesystemPersister {
168         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
169         // down once these start returning failure.
170         // A PermanentFailure implies we need to shut down since we're force-closing channels without
171         // even broadcasting!
172
173         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
174                 let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
175                 util::write_to_file(self.path_to_monitor_data(), filename, monitor)
176                         .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
177         }
178
179         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
180                 let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
181                 util::write_to_file(self.path_to_monitor_data(), filename, monitor)
182                         .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
183         }
184 }
185
186 #[cfg(test)]
187 mod tests {
188         extern crate lightning;
189         extern crate bitcoin;
190         use crate::FilesystemPersister;
191         use bitcoin::blockdata::block::{Block, BlockHeader};
192         use bitcoin::hashes::hex::FromHex;
193         use bitcoin::Txid;
194         use lightning::chain::ChannelMonitorUpdateErr;
195         use lightning::chain::chainmonitor::Persist;
196         use lightning::chain::transaction::OutPoint;
197         use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
198         use lightning::ln::features::InitFeatures;
199         use lightning::ln::functional_test_utils::*;
200         use lightning::util::events::{ClosureReason, MessageSendEventsProvider};
201         use lightning::util::test_utils;
202         use std::fs;
203         #[cfg(target_os = "windows")]
204         use {
205                 lightning::get_event_msg,
206                 lightning::ln::msgs::ChannelMessageHandler,
207         };
208
209         impl Drop for FilesystemPersister {
210                 fn drop(&mut self) {
211                         // We test for invalid directory names, so it's OK if directory removal
212                         // fails.
213                         match fs::remove_dir_all(&self.path_to_channel_data) {
214                                 Err(e) => println!("Failed to remove test persister directory: {}", e),
215                                 _ => {}
216                         }
217                 }
218         }
219
220         // Integration-test the FilesystemPersister. Test relaying a few payments
221         // and check that the persisted data is updated the appropriate number of
222         // times.
223         #[test]
224         fn test_filesystem_persister() {
225                 // Create the nodes, giving them FilesystemPersisters for data persisters.
226                 let persister_0 = FilesystemPersister::new("test_filesystem_persister_0".to_string());
227                 let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
228                 let chanmon_cfgs = create_chanmon_cfgs(2);
229                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
230                 let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, &node_cfgs[0].keys_manager);
231                 let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, &node_cfgs[1].keys_manager);
232                 node_cfgs[0].chain_monitor = chain_mon_0;
233                 node_cfgs[1].chain_monitor = chain_mon_1;
234                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
235                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
236
237                 // Check that the persisted channel data is empty before any channels are
238                 // open.
239                 let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap();
240                 assert_eq!(persisted_chan_data_0.len(), 0);
241                 let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap();
242                 assert_eq!(persisted_chan_data_1.len(), 0);
243
244                 // Helper to make sure the channel is on the expected update ID.
245                 macro_rules! check_persisted_data {
246                         ($expected_update_id: expr) => {
247                                 persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap();
248                                 assert_eq!(persisted_chan_data_0.len(), 1);
249                                 for (_, mon) in persisted_chan_data_0.iter() {
250                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
251                                 }
252                                 persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap();
253                                 assert_eq!(persisted_chan_data_1.len(), 1);
254                                 for (_, mon) in persisted_chan_data_1.iter() {
255                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
256                                 }
257                         }
258                 }
259
260                 // Create some initial channel and check that a channel was persisted.
261                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
262                 check_persisted_data!(0);
263
264                 // Send a few payments and make sure the monitors are updated to the latest.
265                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
266                 check_persisted_data!(5);
267                 send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
268                 check_persisted_data!(10);
269
270                 // Force close because cooperative close doesn't result in any persisted
271                 // updates.
272                 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
273                 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
274                 check_closed_broadcast!(nodes[0], true);
275                 check_added_monitors!(nodes[0], 1);
276
277                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
278                 assert_eq!(node_txn.len(), 1);
279
280                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
281                 connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
282                 check_closed_broadcast!(nodes[1], true);
283                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
284                 check_added_monitors!(nodes[1], 1);
285
286                 // Make sure everything is persisted as expected after close.
287                 check_persisted_data!(11);
288         }
289
290         // Test that if the persister's path to channel data is read-only, writing a
291         // monitor to it results in the persister returning a PermanentFailure.
292         // Windows ignores the read-only flag for folders, so this test is Unix-only.
293         #[cfg(not(target_os = "windows"))]
294         #[test]
295         fn test_readonly_dir_perm_failure() {
296                 let persister = FilesystemPersister::new("test_readonly_dir_perm_failure".to_string());
297                 fs::create_dir_all(&persister.path_to_channel_data).unwrap();
298
299                 // Set up a dummy channel and force close. This will produce a monitor
300                 // that we can then use to test persistence.
301                 let chanmon_cfgs = create_chanmon_cfgs(2);
302                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
303                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
304                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
305                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
306                 nodes[1].node.force_close_channel(&chan.2).unwrap();
307                 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
308                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
309                 let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
310                 let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
311
312                 // Set the persister's directory to read-only, which should result in
313                 // returning a permanent failure when we then attempt to persist a
314                 // channel update.
315                 let path = &persister.path_to_channel_data;
316                 let mut perms = fs::metadata(path).unwrap().permissions();
317                 perms.set_readonly(true);
318                 fs::set_permissions(path, perms).unwrap();
319
320                 let test_txo = OutPoint {
321                         txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
322                         index: 0
323                 };
324                 match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
325                         Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
326                         _ => panic!("unexpected result from persisting new channel")
327                 }
328
329                 nodes[1].node.get_and_clear_pending_msg_events();
330                 added_monitors.clear();
331         }
332
333         // Test that if a persister's directory name is invalid, monitor persistence
334         // will fail.
335         #[cfg(target_os = "windows")]
336         #[test]
337         fn test_fail_on_open() {
338                 // Set up a dummy channel and force close. This will produce a monitor
339                 // that we can then use to test persistence.
340                 let chanmon_cfgs = create_chanmon_cfgs(2);
341                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
342                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
343                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
344                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
345                 nodes[1].node.force_close_channel(&chan.2).unwrap();
346                 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
347                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
348                 let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
349                 let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
350
351                 // Create the persister with an invalid directory name and test that the
352                 // channel fails to open because the directories fail to be created. There
353                 // don't seem to be invalid filename characters on Unix that Rust doesn't
354                 // handle, hence why the test is Windows-only.
355                 let persister = FilesystemPersister::new(":<>/".to_string());
356
357                 let test_txo = OutPoint {
358                         txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
359                         index: 0
360                 };
361                 match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
362                         Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
363                         _ => panic!("unexpected result from persisting new channel")
364                 }
365
366                 nodes[1].node.get_and_clear_pending_msg_events();
367                 added_monitors.clear();
368         }
369 }
370
371 #[cfg(all(test, feature = "_bench_unstable"))]
372 pub mod bench {
373         use test::Bencher;
374
375         #[bench]
376         fn bench_sends(bench: &mut Bencher) {
377                 let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
378                 let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
379                 lightning::ln::channelmanager::bench::bench_two_sends(bench, persister_a, persister_b);
380         }
381 }