Drop height parameter from [dis]connect_block in functional tests
[rust-lightning] / lightning-persister / src / lib.rs
1 //! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs.
2
3 #![deny(broken_intra_doc_links)]
4 #![deny(missing_docs)]
5
6 mod util;
7
8 extern crate lightning;
9 extern crate bitcoin;
10 extern crate libc;
11
12 use bitcoin::hashes::hex::ToHex;
13 use crate::util::DiskWriteable;
14 use lightning::chain;
15 use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
16 use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr};
17 use lightning::chain::channelmonitor;
18 use lightning::chain::keysinterface::{Sign, KeysInterface};
19 use lightning::chain::transaction::OutPoint;
20 use lightning::ln::channelmanager::ChannelManager;
21 use lightning::util::logger::Logger;
22 use lightning::util::ser::Writeable;
23 use std::fs;
24 use std::io::Error;
25 use std::path::PathBuf;
26 use std::sync::Arc;
27
28 #[cfg(test)]
29 use {
30         lightning::util::ser::ReadableArgs,
31         bitcoin::{BlockHash, Txid},
32         bitcoin::hashes::hex::FromHex,
33         std::collections::HashMap,
34         std::io::Cursor
35 };
36
37 /// FilesystemPersister persists channel data on disk, where each channel's
38 /// data is stored in a file named after its funding outpoint.
39 ///
40 /// Warning: this module does the best it can with calls to persist data, but it
41 /// can only guarantee that the data is passed to the drive. It is up to the
42 /// drive manufacturers to do the actual persistence properly, which they often
43 /// don't (especially on consumer-grade hardware). Therefore, it is up to the
44 /// user to validate their entire storage stack, to ensure the writes are
45 /// persistent.
46 /// Corollary: especially when dealing with larger amounts of money, it is best
47 /// practice to have multiple channel data backups and not rely only on one
48 /// FilesystemPersister.
49 pub struct FilesystemPersister {
50         path_to_channel_data: String,
51 }
52
53 impl<Signer: Sign> DiskWriteable for ChannelMonitor<Signer> {
54         fn write_to_file(&self, writer: &mut fs::File) -> Result<(), Error> {
55                 self.write(writer)
56         }
57 }
58
59 impl<Signer: Sign, M, T, K, F, L> DiskWriteable for ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>
60 where M: chain::Watch<Signer>,
61       T: BroadcasterInterface,
62       K: KeysInterface<Signer=Signer>,
63       F: FeeEstimator,
64       L: Logger,
65 {
66         fn write_to_file(&self, writer: &mut fs::File) -> Result<(), std::io::Error> {
67                 self.write(writer)
68         }
69 }
70
71 impl FilesystemPersister {
72         /// Initialize a new FilesystemPersister and set the path to the individual channels'
73         /// files.
74         pub fn new(path_to_channel_data: String) -> Self {
75                 return Self {
76                         path_to_channel_data,
77                 }
78         }
79
80         /// Get the directory which was provided when this persister was initialized.
81         pub fn get_data_dir(&self) -> String {
82                 self.path_to_channel_data.clone()
83         }
84
85         pub(crate) fn path_to_monitor_data(&self) -> PathBuf {
86                 let mut path = PathBuf::from(self.path_to_channel_data.clone());
87                 path.push("monitors");
88                 path
89         }
90
91         /// Writes the provided `ChannelManager` to the path provided at `FilesystemPersister`
92         /// initialization, within a file called "manager".
93         pub fn persist_manager<Signer, M, T, K, F, L>(
94                 data_dir: String,
95                 manager: &ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>
96         ) -> Result<(), std::io::Error>
97         where Signer: Sign,
98               M: chain::Watch<Signer>,
99               T: BroadcasterInterface,
100               K: KeysInterface<Signer=Signer>,
101               F: FeeEstimator,
102               L: Logger
103         {
104                 let path = PathBuf::from(data_dir);
105                 util::write_to_file(path, "manager".to_string(), manager)
106         }
107
108         #[cfg(test)]
109         fn load_channel_data<Keys: KeysInterface>(&self, keys: &Keys) ->
110                 Result<HashMap<OutPoint, ChannelMonitor<Keys::Signer>>, ChannelMonitorUpdateErr> {
111                         if let Err(_) = fs::create_dir_all(self.path_to_monitor_data()) {
112                                 return Err(ChannelMonitorUpdateErr::PermanentFailure);
113                         }
114                         let mut res = HashMap::new();
115                         for file_option in fs::read_dir(self.path_to_monitor_data()).unwrap() {
116                                 let file = file_option.unwrap();
117                                 let owned_file_name = file.file_name();
118                                 let filename = owned_file_name.to_str();
119                                 if !filename.is_some() || !filename.unwrap().is_ascii() || filename.unwrap().len() < 65 {
120                                         return Err(ChannelMonitorUpdateErr::PermanentFailure);
121                                 }
122
123                                 let txid = Txid::from_hex(filename.unwrap().split_at(64).0);
124                                 if txid.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); }
125
126                                 let index = filename.unwrap().split_at(65).1.split('.').next().unwrap().parse();
127                                 if index.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); }
128
129                                 let contents = fs::read(&file.path());
130                                 if contents.is_err() { return Err(ChannelMonitorUpdateErr::PermanentFailure); }
131
132                                 if let Ok((_, loaded_monitor)) =
133                                         <(BlockHash, ChannelMonitor<Keys::Signer>)>::read(&mut Cursor::new(&contents.unwrap()), keys) {
134                                                 res.insert(OutPoint { txid: txid.unwrap(), index: index.unwrap() }, loaded_monitor);
135                                         } else {
136                                                 return Err(ChannelMonitorUpdateErr::PermanentFailure);
137                                         }
138                         }
139                         Ok(res)
140                 }
141 }
142
143 impl<ChannelSigner: Sign + Send + Sync> channelmonitor::Persist<ChannelSigner> for FilesystemPersister {
144         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
145                 let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
146                 util::write_to_file(self.path_to_monitor_data(), filename, monitor)
147                   .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
148         }
149
150         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
151                 let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
152                 util::write_to_file(self.path_to_monitor_data(), filename, monitor)
153                   .map_err(|_| ChannelMonitorUpdateErr::PermanentFailure)
154         }
155 }
156
157 #[cfg(test)]
158 mod tests {
159         extern crate lightning;
160         extern crate bitcoin;
161         use crate::FilesystemPersister;
162         use bitcoin::blockdata::block::{Block, BlockHeader};
163         use bitcoin::hashes::hex::FromHex;
164         use bitcoin::Txid;
165         use lightning::chain::channelmonitor::{Persist, ChannelMonitorUpdateErr};
166         use lightning::chain::transaction::OutPoint;
167         use lightning::{check_closed_broadcast, check_added_monitors};
168         use lightning::ln::features::InitFeatures;
169         use lightning::ln::functional_test_utils::*;
170         use lightning::ln::msgs::ErrorAction;
171         use lightning::util::events::{MessageSendEventsProvider, MessageSendEvent};
172         use lightning::util::test_utils;
173         use std::fs;
174         #[cfg(target_os = "windows")]
175         use {
176                 lightning::get_event_msg,
177                 lightning::ln::msgs::ChannelMessageHandler,
178         };
179
180         impl Drop for FilesystemPersister {
181                 fn drop(&mut self) {
182                         // We test for invalid directory names, so it's OK if directory removal
183                         // fails.
184                         match fs::remove_dir_all(&self.path_to_channel_data) {
185                                 Err(e) => println!("Failed to remove test persister directory: {}", e),
186                                 _ => {}
187                         }
188                 }
189         }
190
191         // Integration-test the FilesystemPersister. Test relaying a few payments
192         // and check that the persisted data is updated the appropriate number of
193         // times.
194         #[test]
195         fn test_filesystem_persister() {
196                 // Create the nodes, giving them FilesystemPersisters for data persisters.
197                 let persister_0 = FilesystemPersister::new("test_filesystem_persister_0".to_string());
198                 let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
199                 let chanmon_cfgs = create_chanmon_cfgs(2);
200                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
201                 let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, &node_cfgs[0].keys_manager);
202                 let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, &node_cfgs[1].keys_manager);
203                 node_cfgs[0].chain_monitor = chain_mon_0;
204                 node_cfgs[1].chain_monitor = chain_mon_1;
205                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
206                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
207
208                 // Check that the persisted channel data is empty before any channels are
209                 // open.
210                 let mut persisted_chan_data_0 = persister_0.load_channel_data(nodes[0].keys_manager).unwrap();
211                 assert_eq!(persisted_chan_data_0.keys().len(), 0);
212                 let mut persisted_chan_data_1 = persister_1.load_channel_data(nodes[1].keys_manager).unwrap();
213                 assert_eq!(persisted_chan_data_1.keys().len(), 0);
214
215                 // Helper to make sure the channel is on the expected update ID.
216                 macro_rules! check_persisted_data {
217                         ($expected_update_id: expr) => {
218                                 persisted_chan_data_0 = persister_0.load_channel_data(nodes[0].keys_manager).unwrap();
219                                 assert_eq!(persisted_chan_data_0.keys().len(), 1);
220                                 for mon in persisted_chan_data_0.values() {
221                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
222                                 }
223                                 persisted_chan_data_1 = persister_1.load_channel_data(nodes[1].keys_manager).unwrap();
224                                 assert_eq!(persisted_chan_data_1.keys().len(), 1);
225                                 for mon in persisted_chan_data_1.values() {
226                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
227                                 }
228                         }
229                 }
230
231                 // Create some initial channel and check that a channel was persisted.
232                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
233                 check_persisted_data!(0);
234
235                 // Send a few payments and make sure the monitors are updated to the latest.
236                 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
237                 check_persisted_data!(5);
238                 send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000, 4_000_000);
239                 check_persisted_data!(10);
240
241                 // Force close because cooperative close doesn't result in any persisted
242                 // updates.
243                 nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
244                 check_closed_broadcast!(nodes[0], false);
245                 check_added_monitors!(nodes[0], 1);
246
247                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
248                 assert_eq!(node_txn.len(), 1);
249
250                 let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
251                 connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[0].clone()]});
252                 check_closed_broadcast!(nodes[1], false);
253                 check_added_monitors!(nodes[1], 1);
254
255                 // Make sure everything is persisted as expected after close.
256                 check_persisted_data!(11);
257         }
258
259         // Test that if the persister's path to channel data is read-only, writing a
260         // monitor to it results in the persister returning a PermanentFailure.
261         // Windows ignores the read-only flag for folders, so this test is Unix-only.
262         #[cfg(not(target_os = "windows"))]
263         #[test]
264         fn test_readonly_dir_perm_failure() {
265                 let persister = FilesystemPersister::new("test_readonly_dir_perm_failure".to_string());
266                 fs::create_dir_all(&persister.path_to_channel_data).unwrap();
267
268                 // Set up a dummy channel and force close. This will produce a monitor
269                 // that we can then use to test persistence.
270                 let chanmon_cfgs = create_chanmon_cfgs(2);
271                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
272                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
273                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
274                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
275                 nodes[1].node.force_close_channel(&chan.2).unwrap();
276                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
277
278                 // Set the persister's directory to read-only, which should result in
279                 // returning a permanent failure when we then attempt to persist a
280                 // channel update.
281                 let path = &persister.path_to_channel_data;
282                 let mut perms = fs::metadata(path).unwrap().permissions();
283                 perms.set_readonly(true);
284                 fs::set_permissions(path, perms).unwrap();
285
286                 let test_txo = OutPoint {
287                         txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
288                         index: 0
289                 };
290                 match persister.persist_new_channel(test_txo, &added_monitors[0].1) {
291                         Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
292                         _ => panic!("unexpected result from persisting new channel")
293                 }
294
295                 nodes[1].node.get_and_clear_pending_msg_events();
296                 added_monitors.clear();
297         }
298
299         // Test that if a persister's directory name is invalid, monitor persistence
300         // will fail.
301         #[cfg(target_os = "windows")]
302         #[test]
303         fn test_fail_on_open() {
304                 // Set up a dummy channel and force close. This will produce a monitor
305                 // that we can then use to test persistence.
306                 let chanmon_cfgs = create_chanmon_cfgs(2);
307                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
308                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
309                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
310                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
311                 nodes[1].node.force_close_channel(&chan.2).unwrap();
312                 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
313
314                 // Create the persister with an invalid directory name and test that the
315                 // channel fails to open because the directories fail to be created. There
316                 // don't seem to be invalid filename characters on Unix that Rust doesn't
317                 // handle, hence why the test is Windows-only.
318                 let persister = FilesystemPersister::new(":<>/".to_string());
319
320                 let test_txo = OutPoint {
321                         txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
322                         index: 0
323                 };
324                 match persister.persist_new_channel(test_txo, &added_monitors[0].1) {
325                         Err(ChannelMonitorUpdateErr::PermanentFailure) => {},
326                         _ => panic!("unexpected result from persisting new channel")
327                 }
328
329                 nodes[1].node.get_and_clear_pending_msg_events();
330                 added_monitors.clear();
331         }
332 }