Merge pull request #1303 from jkczyz/2022-02-docs-with-features
[rust-lightning] / lightning-persister / src / lib.rs
1 //! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs.
2
3 #![deny(broken_intra_doc_links)]
4 #![deny(missing_docs)]
5
6 #![cfg_attr(docsrs, feature(doc_auto_cfg))]
7
8 #![cfg_attr(all(test, feature = "_bench_unstable"), feature(test))]
9 #[cfg(all(test, feature = "_bench_unstable"))] extern crate test;
10
11 mod util;
12
13 extern crate lightning;
14 extern crate bitcoin;
15 extern crate libc;
16
17 use bitcoin::hash_types::{BlockHash, Txid};
18 use bitcoin::hashes::hex::{FromHex, ToHex};
19 use crate::util::DiskWriteable;
20 use lightning::chain;
21 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
22 use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate};
23 use lightning::chain::chainmonitor;
24 use lightning::chain::keysinterface::{Sign, KeysInterface};
25 use lightning::chain::transaction::OutPoint;
26 use lightning::ln::channelmanager::ChannelManager;
27 use lightning::util::logger::Logger;
28 use lightning::util::ser::{ReadableArgs, Writeable};
29 use std::fs;
30 use std::io::{Cursor, Error};
31 use std::ops::Deref;
32 use std::path::{Path, PathBuf};
33
34 /// FilesystemPersister persists channel data on disk, where each channel's
35 /// data is stored in a file named after its funding outpoint.
36 ///
37 /// Warning: this module does the best it can with calls to persist data, but it
38 /// can only guarantee that the data is passed to the drive. It is up to the
39 /// drive manufacturers to do the actual persistence properly, which they often
40 /// don't (especially on consumer-grade hardware). Therefore, it is up to the
41 /// user to validate their entire storage stack, to ensure the writes are
42 /// persistent.
43 /// Corollary: especially when dealing with larger amounts of money, it is best
44 /// practice to have multiple channel data backups and not rely only on one
45 /// FilesystemPersister.
46 pub struct FilesystemPersister {
47         path_to_channel_data: String,
48 }
49
50 impl<Signer: Sign> DiskWriteable for ChannelMonitor<Signer> {
51         fn write_to_file(&self, writer: &mut fs::File) -> Result<(), Error> {
52                 self.write(writer)
53         }
54 }
55
56 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> DiskWriteable for ChannelManager<Signer, M, T, K, F, L>
57 where
58         M::Target: chain::Watch<Signer>,
59         T::Target: BroadcasterInterface,
60         K::Target: KeysInterface<Signer=Signer>,
61         F::Target: FeeEstimator,
62         L::Target: Logger,
63 {
64         fn write_to_file(&self, writer: &mut fs::File) -> Result<(), std::io::Error> {
65                 self.write(writer)
66         }
67 }
68
69 impl FilesystemPersister {
70         /// Initialize a new FilesystemPersister and set the path to the individual channels'
71         /// files.
72         pub fn new(path_to_channel_data: String) -> Self {
73                 return Self {
74                         path_to_channel_data,
75                 }
76         }
77
78         /// Get the directory which was provided when this persister was initialized.
79         pub fn get_data_dir(&self) -> String {
80                 self.path_to_channel_data.clone()
81         }
82
83         pub(crate) fn path_to_monitor_data(&self) -> PathBuf {
84                 let mut path = PathBuf::from(self.path_to_channel_data.clone());
85                 path.push("monitors");
86                 path
87         }
88
89         /// Writes the provided `ChannelManager` to the path provided at `FilesystemPersister`
90         /// initialization, within a file called "manager".
91         pub fn persist_manager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>(
92                 data_dir: String,
93                 manager: &ChannelManager<Signer, M, T, K, F, L>
94         ) -> Result<(), std::io::Error>
95         where
96                 M::Target: chain::Watch<Signer>,
97                 T::Target: BroadcasterInterface,
98                 K::Target: KeysInterface<Signer=Signer>,
99                 F::Target: FeeEstimator,
100                 L::Target: Logger,
101         {
102                 let path = PathBuf::from(data_dir);
103                 util::write_to_file(path, "manager".to_string(), manager)
104         }
105
106         /// Read `ChannelMonitor`s from disk.
107         pub fn read_channelmonitors<Signer: Sign, K: Deref> (
108                 &self, keys_manager: K
109         ) -> Result<Vec<(BlockHash, ChannelMonitor<Signer>)>, std::io::Error>
110                 where K::Target: KeysInterface<Signer=Signer> + Sized,
111         {
112                 let path = self.path_to_monitor_data();
113                 if !Path::new(&path).exists() {
114                         return Ok(Vec::new());
115                 }
116                 let mut res = Vec::new();
117                 for file_option in fs::read_dir(path).unwrap() {
118                         let file = file_option.unwrap();
119                         let owned_file_name = file.file_name();
120                         let filename = owned_file_name.to_str();
121                         if !filename.is_some() || !filename.unwrap().is_ascii() || filename.unwrap().len() < 65 {
122                                 return Err(std::io::Error::new(
123                                         std::io::ErrorKind::InvalidData,
124                                         "Invalid ChannelMonitor file name",
125                                 ));
126                         }
127                         if filename.unwrap().ends_with(".tmp") {
128                                 // If we were in the middle of committing an new update and crashed, it should be
129                                 // safe to ignore the update - we should never have returned to the caller and
130                                 // irrevocably committed to the new state in any way.
131                                 continue;
132                         }
133
134                         let txid = Txid::from_hex(filename.unwrap().split_at(64).0);
135                         if txid.is_err() {
136                                 return Err(std::io::Error::new(
137                                         std::io::ErrorKind::InvalidData,
138                                         "Invalid tx ID in filename",
139                                 ));
140                         }
141
142                         let index = filename.unwrap().split_at(65).1.parse();
143                         if index.is_err() {
144                                 return Err(std::io::Error::new(
145                                         std::io::ErrorKind::InvalidData,
146                                         "Invalid tx index in filename",
147                                 ));
148                         }
149
150                         let contents = fs::read(&file.path())?;
151                         let mut buffer = Cursor::new(&contents);
152                         match <(BlockHash, ChannelMonitor<Signer>)>::read(&mut buffer, &*keys_manager) {
153                                 Ok((blockhash, channel_monitor)) => {
154                                         if channel_monitor.get_funding_txo().0.txid != txid.unwrap() || channel_monitor.get_funding_txo().0.index != index.unwrap() {
155                                                 return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "ChannelMonitor was stored in the wrong file"));
156                                         }
157                                         res.push((blockhash, channel_monitor));
158                                 }
159                                 Err(e) => return Err(std::io::Error::new(
160                                         std::io::ErrorKind::InvalidData,
161                                         format!("Failed to deserialize ChannelMonitor: {}", e),
162                                 ))
163                         }
164                 }
165                 Ok(res)
166         }
167 }
168
169 impl<ChannelSigner: Sign> chainmonitor::Persist<ChannelSigner> for FilesystemPersister {
170         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
171         // down once these start returning failure.
172         // A PermanentFailure implies we need to shut down since we're force-closing channels without
173         // even broadcasting!
174
175         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
176                 let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
177                 util::write_to_file(self.path_to_monitor_data(), filename, monitor)
178                         .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
179         }
180
181         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
182                 let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
183                 util::write_to_file(self.path_to_monitor_data(), filename, monitor)
184                         .map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
185         }
186 }
187
188 #[cfg(test)]
189 mod tests {
190         extern crate lightning;
191         extern crate bitcoin;
192         use crate::FilesystemPersister;
193         use bitcoin::blockdata::block::{Block, BlockHeader};
194         use bitcoin::hashes::hex::FromHex;
195         use bitcoin::Txid;
196         use lightning::chain::ChannelMonitorUpdateErr;
197         use lightning::chain::chainmonitor::Persist;
198         use lightning::chain::transaction::OutPoint;
199         use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
200         use lightning::ln::features::InitFeatures;
201         use lightning::ln::functional_test_utils::*;
202         use lightning::util::events::{ClosureReason, MessageSendEventsProvider};
203         use lightning::util::test_utils;
204         use std::fs;
205         #[cfg(target_os = "windows")]
206         use {
207                 lightning::get_event_msg,
208                 lightning::ln::msgs::ChannelMessageHandler,
209         };
210
211         impl Drop for FilesystemPersister {
212                 fn drop(&mut self) {
213                         // We test for invalid directory names, so it's OK if directory removal
214                         // fails.
215                         match fs::remove_dir_all(&self.path_to_channel_data) {
216                                 Err(e) => println!("Failed to remove test persister directory: {}", e),
217                                 _ => {}
218                         }
219                 }
220         }
221
222         // Integration-test the FilesystemPersister. Test relaying a few payments
223         // and check that the persisted data is updated the appropriate number of
224         // times.
225         #[test]
226         fn test_filesystem_persister() {
227                 // Create the nodes, giving them FilesystemPersisters for data persisters.
228                 let persister_0 = FilesystemPersister::new("test_filesystem_persister_0".to_string());
229                 let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
230                 let chanmon_cfgs = create_chanmon_cfgs(2);
231                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
232                 let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, &node_cfgs[0].keys_manager);
233                 let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, &node_cfgs[1].keys_manager);
234                 node_cfgs[0].chain_monitor = chain_mon_0;
235                 node_cfgs[1].chain_monitor = chain_mon_1;
236                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
237                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
238
239                 // Check that the persisted channel data is empty before any channels are
240                 // open.
241                 let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap();
242                 assert_eq!(persisted_chan_data_0.len(), 0);
243                 let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap();
244                 assert_eq!(persisted_chan_data_1.len(), 0);
245
246                 // Helper to make sure the channel is on the expected update ID.
247                 macro_rules! check_persisted_data {
248                         ($expected_update_id: expr) => {
249                                 persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager).unwrap();
250                                 assert_eq!(persisted_chan_data_0.len(), 1);
251                                 for (_, mon) in persisted_chan_data_0.iter() {
252                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
253                                 }
254                                 persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager).unwrap();
255                                 assert_eq!(persisted_chan_data_1.len(), 1);
256                                 for (_, mon) in persisted_chan_data_1.iter() {
257                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
258                                 }
259                         }
260                 }
261
262                 // Create some initial channel and check that a channel was persisted.
263                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
264                 check_persisted_data!(0);
265
266                 // Send a few payments and make sure the monitors are updated to the latest.
267                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
268                 check_persisted_data!(5);
269                 send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
270                 check_persisted_data!(10);
271
272                 // Force close because cooperative close doesn't result in any persisted
273                 // updates.
274                 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
275                 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
276                 check_closed_broadcast!(nodes[0], true);
277                 check_added_monitors!(nodes[0], 1);
278
279                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
280                 assert_eq!(node_txn.len(), 1);
281
282                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
283                 connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
284                 check_closed_broadcast!(nodes[1], true);
285                 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
286                 check_added_monitors!(nodes[1], 1);
287
288                 // Make sure everything is persisted as expected after close.
289                 check_persisted_data!(11);
290         }
291
292         // Test that if the persister's path to channel data is read-only, writing a
293         // monitor to it results in the persister returning a PermanentFailure.
294         // Windows ignores the read-only flag for folders, so this test is Unix-only.
295         #[cfg(not(target_os = "windows"))]
296         #[test]
297         fn test_readonly_dir_perm_failure() {
298                 let persister = FilesystemPersister::new("test_readonly_dir_perm_failure".to_string());
299                 fs::create_dir_all(&persister.path_to_channel_data).unwrap();
300
301                 // Set up a dummy channel and force close. This will produce a monitor
302                 // that we can then use to test persistence.
303                 let chanmon_cfgs = create_chanmon_cfgs(2);
304                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
305                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
306                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
307                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
308                 nodes[1].node.force_close_channel(&chan.2).unwrap();
309                 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
310                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
311                 let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
312                 let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
313
314                 // Set the persister's directory to read-only, which should result in
315                 // returning a permanent failure when we then attempt to persist a
316                 // channel update.
317                 let path = &persister.path_to_channel_data;
318                 let mut perms = fs::metadata(path).unwrap().permissions();
319                 perms.set_readonly(true);
320                 fs::set_permissions(path, perms).unwrap();
321
322                 let test_txo = OutPoint {
323                         txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
324                         index: 0
325                 };
326                 match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
327                         Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
328                         _ => panic!("unexpected result from persisting new channel")
329                 }
330
331                 nodes[1].node.get_and_clear_pending_msg_events();
332                 added_monitors.clear();
333         }
334
335         // Test that if a persister's directory name is invalid, monitor persistence
336         // will fail.
337         #[cfg(target_os = "windows")]
338         #[test]
339         fn test_fail_on_open() {
340                 // Set up a dummy channel and force close. This will produce a monitor
341                 // that we can then use to test persistence.
342                 let chanmon_cfgs = create_chanmon_cfgs(2);
343                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
344                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
345                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
346                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
347                 nodes[1].node.force_close_channel(&chan.2).unwrap();
348                 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
349                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
350                 let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
351                 let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
352
353                 // Create the persister with an invalid directory name and test that the
354                 // channel fails to open because the directories fail to be created. There
355                 // don't seem to be invalid filename characters on Unix that Rust doesn't
356                 // handle, hence why the test is Windows-only.
357                 let persister = FilesystemPersister::new(":<>/".to_string());
358
359                 let test_txo = OutPoint {
360                         txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
361                         index: 0
362                 };
363                 match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
364                         Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
365                         _ => panic!("unexpected result from persisting new channel")
366                 }
367
368                 nodes[1].node.get_and_clear_pending_msg_events();
369                 added_monitors.clear();
370         }
371 }
372
373 #[cfg(all(test, feature = "_bench_unstable"))]
374 pub mod bench {
375         use test::Bencher;
376
377         #[bench]
378         fn bench_sends(bench: &mut Bencher) {
379                 let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
380                 let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
381                 lightning::ln::channelmanager::bench::bench_two_sends(bench, persister_a, persister_b);
382         }
383 }