41a742cadbbda647db83bf03eb75995d011a1d0f
[rust-lightning] / lightning / src / util / persist.rs
1 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4 // You may not use this file except in accordance with one or both of these
5 // licenses.
6
7 //! This module contains a simple key-value store trait [`KVStore`] that
8 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9 //! and [`ChannelMonitor`] all in one place.
10
11 use core::cmp;
12 use core::convert::{TryFrom, TryInto};
13 use core::ops::Deref;
14 use core::str::FromStr;
15 use bitcoin::{BlockHash, Txid};
16
17 use crate::{io, log_error};
18 use crate::alloc::string::ToString;
19 use crate::prelude::*;
20
21 use crate::chain;
22 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
23 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
24 use crate::sign::{EntropySource, NodeSigner, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
25 use crate::chain::transaction::OutPoint;
26 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
27 use crate::ln::channelmanager::ChannelManager;
28 use crate::routing::router::Router;
29 use crate::routing::gossip::NetworkGraph;
30 use crate::routing::scoring::WriteableScore;
31 use crate::util::logger::Logger;
32 use crate::util::ser::{Readable, ReadableArgs, Writeable};
33
34 /// The alphabet of characters allowed for namespaces and keys.
35 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
36
37 /// The maximum number of characters namespaces and keys may have.
38 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
39
40 /// The primary namespace under which the [`ChannelManager`] will be persisted.
41 pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
42 /// The secondary namespace under which the [`ChannelManager`] will be persisted.
43 pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
44 /// The key under which the [`ChannelManager`] will be persisted.
45 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
46
47 /// The primary namespace under which [`ChannelMonitor`]s will be persisted.
48 pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
49 /// The secondary namespace under which [`ChannelMonitor`]s will be persisted.
50 pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
51 /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
52 pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
53
54 /// The primary namespace under which the [`NetworkGraph`] will be persisted.
55 pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
56 /// The secondary namespace under which the [`NetworkGraph`] will be persisted.
57 pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
58 /// The key under which the [`NetworkGraph`] will be persisted.
59 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
60
61 /// The primary namespace under which the [`WriteableScore`] will be persisted.
62 pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
63 /// The secondary namespace under which the [`WriteableScore`] will be persisted.
64 pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
65 /// The key under which the [`WriteableScore`] will be persisted.
66 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
67
68 /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
69 ///
70 /// This serves to prevent someone from accidentally loading such monitors (which may need
71 /// updates applied to be current) with another implementation.
72 pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
73
74 /// Provides an interface that allows storage and retrieval of persisted values that are associated
75 /// with given keys.
76 ///
77 /// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
78 /// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
79 /// ways, as long as per-namespace key uniqueness is asserted.
80 ///
81 /// Keys and namespaces are required to be valid ASCII strings in the range of
82 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
83 /// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
84 /// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
85 /// that concerns should always be separated by primary namespace first, before secondary
86 /// namespaces are used. While the number of primary namespaces will be relatively small and is
87 /// determined at compile time, there may be many secondary namespaces per primary namespace. Note
88 /// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
89 /// namespace, i.e., conflicts between keys and equally named
90 /// primary namespaces/secondary namespaces must be avoided.
91 ///
92 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
93 /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
94 /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
95 pub trait KVStore {
96         /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
97         /// `key`.
98         ///
99         /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
100         /// `primary_namespace` and `secondary_namespace`.
101         ///
102         /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
103         fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
104         /// Persists the given data under the given `key`.
105         ///
106         /// Will create the given `primary_namespace` and `secondary_namespace` if not already present
107         /// in the store.
108         fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
109         /// Removes any data that had previously been persisted under the given `key`.
110         ///
111         /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
112         /// remove the given `key` at some point in time after the method returns, e.g., as part of an
113         /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
114         /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
115         ///
116         /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
117         /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
118         /// potentially get lost on crash after the method returns. Therefore, this flag should only be
119         /// set for `remove` operations that can be safely replayed at a later time.
120         ///
121         /// Returns successfully if no data will be stored for the given `primary_namespace`,
122         /// `secondary_namespace`, and `key`, independently of whether it was present before its
123         /// invokation or not.
124         fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
125         /// Returns a list of keys that are stored under the given `secondary_namespace` in
126         /// `primary_namespace`.
127         ///
128         /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
129         /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
130         fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
131 }
132
133 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
134 pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
135         where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
136                 T::Target: 'static + BroadcasterInterface,
137                 ES::Target: 'static + EntropySource,
138                 NS::Target: 'static + NodeSigner,
139                 SP::Target: 'static + SignerProvider,
140                 F::Target: 'static + FeeEstimator,
141                 R::Target: 'static + Router,
142                 L::Target: 'static + Logger,
143 {
144         /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
145         fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error>;
146
147         /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
148         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
149
150         /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
151         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
152 }
153
154
155 impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
156         where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
157                 T::Target: 'static + BroadcasterInterface,
158                 ES::Target: 'static + EntropySource,
159                 NS::Target: 'static + NodeSigner,
160                 SP::Target: 'static + SignerProvider,
161                 F::Target: 'static + FeeEstimator,
162                 R::Target: 'static + Router,
163                 L::Target: 'static + Logger,
164 {
165         fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
166                 self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
167                         CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
168                         CHANNEL_MANAGER_PERSISTENCE_KEY,
169                         &channel_manager.encode())
170         }
171
172         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
173                 self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
174                         NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
175                         NETWORK_GRAPH_PERSISTENCE_KEY,
176                         &network_graph.encode())
177         }
178
179         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
180                 self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
181                         SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
182                         SCORER_PERSISTENCE_KEY,
183                         &scorer.encode())
184         }
185 }
186
187 impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for dyn KVStore + Send + Sync
188         where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
189                 T::Target: 'static + BroadcasterInterface,
190                 ES::Target: 'static + EntropySource,
191                 NS::Target: 'static + NodeSigner,
192                 SP::Target: 'static + SignerProvider,
193                 F::Target: 'static + FeeEstimator,
194                 R::Target: 'static + Router,
195                 L::Target: 'static + Logger,
196 {
197         fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
198                 self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
199                         CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
200                         CHANNEL_MANAGER_PERSISTENCE_KEY,
201                         &channel_manager.encode())
202         }
203
204         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
205                 self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
206                         NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
207                         NETWORK_GRAPH_PERSISTENCE_KEY,
208                         &network_graph.encode())
209         }
210
211         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
212                 self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
213                         SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
214                         SCORER_PERSISTENCE_KEY,
215                         &scorer.encode())
216         }
217 }
218
219 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
220         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
221         // down once these start returning failure.
222         // Then we should return InProgress rather than UnrecoverableError, implying we should probably
223         // just shut down the node since we're not retrying persistence!
224
225         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
226                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
227                 match self.write(
228                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
229                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
230                         &key, &monitor.encode())
231                 {
232                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
233                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
234                 }
235         }
236
237         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
238                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
239                 match self.write(
240                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
241                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
242                         &key, &monitor.encode())
243                 {
244                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
245                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
246                 }
247         }
248 }
249
250 impl<ChannelSigner: WriteableEcdsaChannelSigner> Persist<ChannelSigner> for dyn KVStore + Send + Sync {
251         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
252         // down once these start returning failure.
253         // Then we should return InProgress rather than UnrecoverableError, implying we should probably
254         // just shut down the node since we're not retrying persistence!
255
256         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
257                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
258                 match self.write(
259                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
260                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
261                         &key, &monitor.encode())
262                 {
263                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
264                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
265                 }
266         }
267
268         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
269                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
270                 match self.write(
271                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
272                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
273                         &key, &monitor.encode())
274                 {
275                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
276                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
277                 }
278         }
279 }
280
281 /// Read previously persisted [`ChannelMonitor`]s from the store.
282 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
283         kv_store: K, entropy_source: ES, signer_provider: SP,
284 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
285 where
286         K::Target: KVStore,
287         ES::Target: EntropySource + Sized,
288         SP::Target: SignerProvider + Sized,
289 {
290         let mut res = Vec::new();
291
292         for stored_key in kv_store.list(
293                 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)?
294         {
295                 if stored_key.len() < 66 {
296                         return Err(io::Error::new(
297                                 io::ErrorKind::InvalidData,
298                                 "Stored key has invalid length"));
299                 }
300
301                 let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
302                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
303                 })?;
304
305                 let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
306                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
307                 })?;
308
309                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
310                         &mut io::Cursor::new(
311                                 kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
312                         (&*entropy_source, &*signer_provider),
313                 ) {
314                         Ok((block_hash, channel_monitor)) => {
315                                 if channel_monitor.get_funding_txo().0.txid != txid
316                                         || channel_monitor.get_funding_txo().0.index != index
317                                 {
318                                         return Err(io::Error::new(
319                                                 io::ErrorKind::InvalidData,
320                                                 "ChannelMonitor was stored under the wrong key",
321                                         ));
322                                 }
323                                 res.push((block_hash, channel_monitor));
324                         }
325                         Err(_) => {
326                                 return Err(io::Error::new(
327                                         io::ErrorKind::InvalidData,
328                                         "Failed to read ChannelMonitor"
329                                 ))
330                         }
331                 }
332         }
333         Ok(res)
334 }
335
336 /// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
337 /// [`ChannelMonitorUpdate`]s.
338 ///
339 /// # Overview
340 ///
341 /// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
342 /// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
343 /// deleting) and complexity. This is because it writes channel monitor differential updates,
344 /// whereas the other (default) implementation rewrites the entire monitor on each update. For
345 /// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
346 /// of megabytes (or more). Updates can be as small as a few hundred bytes.
347 ///
348 /// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
349 /// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
350 /// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
351 /// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
352 /// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
353 /// sentinel bytes.
354 ///
355 /// # Storing monitors
356 ///
357 /// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
358 ///
359 ///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
360 ///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
361 ///
362 /// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`],
363 /// using the familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
364 ///
365 /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
366 ///
367 ///   - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`]
368 ///   - secondary namespace: [the monitor's encoded outpoint name]
369 ///
370 /// Under that secondary namespace, each update is stored with a number string, like `21`, which
371 /// represents its `update_id` value.
372 ///
373 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
374 ///
375 ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
376 ///   - Index: `1`
377 ///
378 /// Full channel monitors would be stored at a single key:
379 ///
380 /// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
381 ///
382 /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
383 ///
384 /// ```text
385 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
386 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
387 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
388 /// ```
389 /// ... and so on.
390 ///
391 /// # Reading channel state from storage
392 ///
393 /// Channel state can be reconstructed by calling
394 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
395 /// list channel monitors themselves and load channels individually using
396 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
397 ///
398 /// ## EXTREMELY IMPORTANT
399 ///
400 /// It is extremely important that your [`KVStore::read`] implementation uses the
401 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
402 /// that circumstance (not when there is really a permissions error, for example). This is because
403 /// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
404 /// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
405 /// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
406 ///
407 /// # Pruning stale channel updates
408 ///
409 /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
410 /// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
411 /// are deleted.
412 /// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
413 /// will complete. However, stale updates are not a problem for data integrity, since updates are
414 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
415 ///
416 /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
417 /// would like to get rid of them, consider using the
418 /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
419 pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
420 where
421         K::Target: KVStore,
422         L::Target: Logger,
423         ES::Target: EntropySource + Sized,
424         SP::Target: SignerProvider + Sized,
425 {
426         kv_store: K,
427         logger: L,
428         maximum_pending_updates: u64,
429         entropy_source: ES,
430         signer_provider: SP,
431 }
432
433 #[allow(dead_code)]
434 impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
435         MonitorUpdatingPersister<K, L, ES, SP>
436 where
437         K::Target: KVStore,
438         L::Target: Logger,
439         ES::Target: EntropySource + Sized,
440         SP::Target: SignerProvider + Sized,
441 {
442         /// Constructs a new [`MonitorUpdatingPersister`].
443         ///
444         /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
445         /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
446         /// consolidation will frequently occur with fewer updates than what you set here; this number
447         /// is merely the maximum that may be stored. When setting this value, consider that for higher
448         /// values of `maximum_pending_updates`:
449         ///
450         ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
451         /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
452         /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
453         ///   - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
454         /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
455         /// less frequent "waves."
456         ///   - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
457         /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
458         pub fn new(
459                 kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
460                 signer_provider: SP,
461         ) -> Self {
462                 MonitorUpdatingPersister {
463                         kv_store,
464                         logger,
465                         maximum_pending_updates,
466                         entropy_source,
467                         signer_provider,
468                 }
469         }
470
471         /// Reads all stored channel monitors, along with any stored updates for them.
472         ///
473         /// It is extremely important that your [`KVStore::read`] implementation uses the
474         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
475         /// documentation for [`MonitorUpdatingPersister`].
476         pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
477                 &self, broadcaster: &B, fee_estimator: &F,
478         ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
479         where
480                 B::Target: BroadcasterInterface,
481                 F::Target: FeeEstimator,
482         {
483                 let monitor_list = self.kv_store.list(
484                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
485                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
486                 )?;
487                 let mut res = Vec::with_capacity(monitor_list.len());
488                 for monitor_key in monitor_list {
489                         res.push(self.read_channel_monitor_with_updates(
490                                 broadcaster,
491                                 fee_estimator,
492                                 monitor_key,
493                         )?)
494                 }
495                 Ok(res)
496         }
497
498         /// Read a single channel monitor, along with any stored updates for it.
499         ///
500         /// It is extremely important that your [`KVStore::read`] implementation uses the
501         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
502         /// documentation for [`MonitorUpdatingPersister`].
503         ///
504         /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
505         /// [`OutPoint`], with an underscore `_` between them. For example, given:
506         ///
507         ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
508         ///   - Index: `1`
509         ///
510         /// The correct `monitor_key` would be:
511         /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
512         ///
513         /// Loading a large number of monitors will be faster if done in parallel. You can use this
514         /// function to accomplish this. Take care to limit the number of parallel readers.
515         pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
516                 &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
517         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
518         where
519                 B::Target: BroadcasterInterface,
520                 F::Target: FeeEstimator,
521         {
522                 let monitor_name = MonitorName::new(monitor_key)?;
523                 let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
524                 let mut current_update_id = monitor.get_latest_update_id();
525                 loop {
526                         current_update_id = match current_update_id.checked_add(1) {
527                                 Some(next_update_id) => next_update_id,
528                                 None => break,
529                         };
530                         let update_name = UpdateName::from(current_update_id);
531                         let update = match self.read_monitor_update(&monitor_name, &update_name) {
532                                 Ok(update) => update,
533                                 Err(err) if err.kind() == io::ErrorKind::NotFound => {
534                                         // We can't find any more updates, so we are done.
535                                         break;
536                                 }
537                                 Err(err) => return Err(err),
538                         };
539
540                         monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
541                                 .map_err(|e| {
542                                         log_error!(
543                                                 self.logger,
544                                                 "Monitor update failed. monitor: {} update: {} reason: {:?}",
545                                                 monitor_name.as_str(),
546                                                 update_name.as_str(),
547                                                 e
548                                         );
549                                         io::Error::new(io::ErrorKind::Other, "Monitor update failed")
550                                 })?;
551                 }
552                 Ok((block_hash, monitor))
553         }
554
555         /// Read a channel monitor.
556         fn read_monitor(
557                 &self, monitor_name: &MonitorName,
558         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
559                 let outpoint: OutPoint = monitor_name.try_into()?;
560                 let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
561                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
562                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
563                         monitor_name.as_str(),
564                 )?);
565                 // Discard the sentinel bytes if found.
566                 if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
567                         monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
568                 }
569                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
570                         &mut monitor_cursor,
571                         (&*self.entropy_source, &*self.signer_provider),
572                 ) {
573                         Ok((blockhash, channel_monitor)) => {
574                                 if channel_monitor.get_funding_txo().0.txid != outpoint.txid
575                                         || channel_monitor.get_funding_txo().0.index != outpoint.index
576                                 {
577                                         log_error!(
578                                                 self.logger,
579                                                 "ChannelMonitor {} was stored under the wrong key!",
580                                                 monitor_name.as_str()
581                                         );
582                                         Err(io::Error::new(
583                                                 io::ErrorKind::InvalidData,
584                                                 "ChannelMonitor was stored under the wrong key",
585                                         ))
586                                 } else {
587                                         Ok((blockhash, channel_monitor))
588                                 }
589                         }
590                         Err(e) => {
591                                 log_error!(
592                                         self.logger,
593                                         "Failed to read ChannelMonitor {}, reason: {}",
594                                         monitor_name.as_str(),
595                                         e,
596                                 );
597                                 Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
598                         }
599                 }
600         }
601
602         /// Read a channel monitor update.
603         fn read_monitor_update(
604                 &self, monitor_name: &MonitorName, update_name: &UpdateName,
605         ) -> Result<ChannelMonitorUpdate, io::Error> {
606                 let update_bytes = self.kv_store.read(
607                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
608                         monitor_name.as_str(),
609                         update_name.as_str(),
610                 )?;
611                 ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
612                         log_error!(
613                                 self.logger,
614                                 "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
615                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
616                                 monitor_name.as_str(),
617                                 update_name.as_str(),
618                                 e,
619                         );
620                         io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
621                 })
622         }
623
624         /// Cleans up stale updates for all monitors.
625         ///
626         /// This function works by first listing all monitors, and then for each of them, listing all
627         /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
628         /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
629         /// be passed to [`KVStore::remove`].
630         pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
631                 let monitor_keys = self.kv_store.list(
632                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
633                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
634                 )?;
635                 for monitor_key in monitor_keys {
636                         let monitor_name = MonitorName::new(monitor_key)?;
637                         let (_, current_monitor) = self.read_monitor(&monitor_name)?;
638                         let updates = self
639                                 .kv_store
640                                 .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?;
641                         for update in updates {
642                                 let update_name = UpdateName::new(update)?;
643                                 // if the update_id is lower than the stored monitor, delete
644                                 if update_name.0 <= current_monitor.get_latest_update_id() {
645                                         self.kv_store.remove(
646                                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
647                                                 monitor_name.as_str(),
648                                                 update_name.as_str(),
649                                                 lazy,
650                                         )?;
651                                 }
652                         }
653                 }
654                 Ok(())
655         }
656 }
657
658 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
659         Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
660 where
661         K::Target: KVStore,
662         L::Target: Logger,
663         ES::Target: EntropySource + Sized,
664         SP::Target: SignerProvider + Sized,
665 {
666         /// Persists a new channel. This means writing the entire monitor to the
667         /// parametrized [`KVStore`].
668         fn persist_new_channel(
669                 &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
670                 _monitor_update_call_id: MonitorUpdateId,
671         ) -> chain::ChannelMonitorUpdateStatus {
672                 // Determine the proper key for this monitor
673                 let monitor_name = MonitorName::from(funding_txo);
674                 // Serialize and write the new monitor
675                 let mut monitor_bytes = Vec::with_capacity(
676                         MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
677                 );
678                 monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
679                 monitor.write(&mut monitor_bytes).unwrap();
680                 match self.kv_store.write(
681                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
682                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
683                         monitor_name.as_str(),
684                         &monitor_bytes,
685                 ) {
686                         Ok(_) => {
687                                 chain::ChannelMonitorUpdateStatus::Completed
688                         }
689                         Err(e) => {
690                                 log_error!(
691                                         self.logger,
692                                         "Failed to write ChannelMonitor {}/{}/{} reason: {}",
693                                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
694                                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
695                                         monitor_name.as_str(),
696                                         e
697                                 );
698                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
699                         }
700                 }
701         }
702
703         /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
704         ///
705         /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
706         ///
707         ///   - No full monitor is found in [`KVStore`]
708         ///   - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
709         ///   - LDK commands re-persisting the entire monitor through this function, specifically when
710         ///     `update` is `None`.
711         ///   - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
712         fn update_persisted_channel(
713                 &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
714                 monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
715         ) -> chain::ChannelMonitorUpdateStatus {
716                 // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
717                 // ChannelMonitorUpdate's update_id.
718                 if let Some(update) = update {
719                         if update.update_id != CLOSED_CHANNEL_UPDATE_ID
720                                 && update.update_id % self.maximum_pending_updates != 0
721                         {
722                                 let monitor_name = MonitorName::from(funding_txo);
723                                 let update_name = UpdateName::from(update.update_id);
724                                 match self.kv_store.write(
725                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
726                                         monitor_name.as_str(),
727                                         update_name.as_str(),
728                                         &update.encode(),
729                                 ) {
730                                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
731                                         Err(e) => {
732                                                 log_error!(
733                                                         self.logger,
734                                                         "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
735                                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
736                                                         monitor_name.as_str(),
737                                                         update_name.as_str(),
738                                                         e
739                                                 );
740                                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
741                                         }
742                                 }
743                         } else {
744                                 let monitor_name = MonitorName::from(funding_txo);
745                                 // In case of channel-close monitor update, we need to read old monitor before persisting
746                                 // the new one in order to determine the cleanup range.
747                                 let maybe_old_monitor = match monitor.get_latest_update_id() {
748                                         CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
749                                         _ => None
750                                 };
751
752                                 // We could write this update, but it meets criteria of our design that calls for a full monitor write.
753                                 let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
754
755                                 if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
756                                         let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
757                                                 // If there is an error while reading old monitor, we skip clean up.
758                                                 maybe_old_monitor.map(|(_, ref old_monitor)| {
759                                                         let start = old_monitor.get_latest_update_id();
760                                                         // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
761                                                         let end = cmp::min(
762                                                                 start.saturating_add(self.maximum_pending_updates),
763                                                                 CLOSED_CHANNEL_UPDATE_ID - 1,
764                                                         );
765                                                         (start, end)
766                                                 })
767                                         } else {
768                                                 let end = monitor.get_latest_update_id();
769                                                 let start = end.saturating_sub(self.maximum_pending_updates);
770                                                 Some((start, end))
771                                         };
772
773                                         if let Some((start, end)) = cleanup_range {
774                                                 self.cleanup_in_range(monitor_name, start, end);
775                                         }
776                                 }
777
778                                 monitor_update_status
779                         }
780                 } else {
781                         // There is no update given, so we must persist a new monitor.
782                         self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
783                 }
784         }
785 }
786
787 impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
788 where
789         ES::Target: EntropySource + Sized,
790         K::Target: KVStore,
791         L::Target: Logger,
792         SP::Target: SignerProvider + Sized
793 {
794         // Cleans up monitor updates for given monitor in range `start..=end`.
795         fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
796                 for update_id in start..=end {
797                         let update_name = UpdateName::from(update_id);
798                         if let Err(e) = self.kv_store.remove(
799                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
800                                 monitor_name.as_str(),
801                                 update_name.as_str(),
802                                 true,
803                         ) {
804                                 log_error!(
805                                         self.logger,
806                                         "Failed to clean up channel monitor updates for monitor {}, reason: {}",
807                                         monitor_name.as_str(),
808                                         e
809                                 );
810                         };
811                 }
812         }
813 }
814
815 /// A struct representing a name for a monitor.
816 #[derive(Debug)]
817 struct MonitorName(String);
818
819 impl MonitorName {
820         /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
821         /// be formed from the given `name`.
822         pub fn new(name: String) -> Result<Self, io::Error> {
823                 MonitorName::do_try_into_outpoint(&name)?;
824                 Ok(Self(name))
825         }
826         /// Convert this monitor name to a str.
827         pub fn as_str(&self) -> &str {
828                 &self.0
829         }
830         /// Attempt to form a valid [`OutPoint`] from a given name string.
831         fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
832                 let mut parts = name.splitn(2, '_');
833                 let txid = if let Some(part) = parts.next() {
834                         Txid::from_str(part).map_err(|_| {
835                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
836                         })?
837                 } else {
838                         return Err(io::Error::new(
839                                 io::ErrorKind::InvalidData,
840                                 "Stored monitor key is not a splittable string",
841                         ));
842                 };
843                 let index = if let Some(part) = parts.next() {
844                         part.parse().map_err(|_| {
845                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
846                         })?
847                 } else {
848                         return Err(io::Error::new(
849                                 io::ErrorKind::InvalidData,
850                                 "No tx index value found after underscore in stored key",
851                         ));
852                 };
853                 Ok(OutPoint { txid, index })
854         }
855 }
856
857 impl TryFrom<&MonitorName> for OutPoint {
858         type Error = io::Error;
859
860         fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
861                 MonitorName::do_try_into_outpoint(&value.0)
862         }
863 }
864
865 impl From<OutPoint> for MonitorName {
866         fn from(value: OutPoint) -> Self {
867                 MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
868         }
869 }
870
871 /// A struct representing a name for an update.
872 #[derive(Debug)]
873 struct UpdateName(u64, String);
874
875 impl UpdateName {
876         /// Constructs an [`UpdateName`], after verifying that an update sequence ID
877         /// can be derived from the given `name`.
878         pub fn new(name: String) -> Result<Self, io::Error> {
879                 match name.parse::<u64>() {
880                         Ok(u) => Ok(u.into()),
881                         Err(_) => {
882                                 Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
883                         }
884                 }
885         }
886
887         /// Convert this monitor update name to a &str
888         pub fn as_str(&self) -> &str {
889                 &self.1
890         }
891 }
892
893 impl From<u64> for UpdateName {
894         fn from(value: u64) -> Self {
895                 Self(value, value.to_string())
896         }
897 }
898
899 #[cfg(test)]
900 mod tests {
901         use super::*;
902         use crate::chain::chainmonitor::Persist;
903         use crate::chain::ChannelMonitorUpdateStatus;
904         use crate::events::{ClosureReason, MessageSendEventsProvider};
905         use crate::ln::functional_test_utils::*;
906         use crate::util::test_utils::{self, TestLogger, TestStore};
907         use crate::{check_added_monitors, check_closed_broadcast};
908
909         const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
910
911         #[test]
912         fn converts_u64_to_update_name() {
913                 assert_eq!(UpdateName::from(0).as_str(), "0");
914                 assert_eq!(UpdateName::from(21).as_str(), "21");
915                 assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
916         }
917
918         #[test]
919         fn bad_update_name_fails() {
920                 assert!(UpdateName::new("deadbeef".to_string()).is_err());
921                 assert!(UpdateName::new("-1".to_string()).is_err());
922         }
923
924         #[test]
925         fn monitor_from_outpoint_works() {
926                 let monitor_name1 = MonitorName::from(OutPoint {
927                         txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
928                         index: 1,
929                 });
930                 assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
931
932                 let monitor_name2 = MonitorName::from(OutPoint {
933                         txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
934                         index: u16::MAX,
935                 });
936                 assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
937         }
938
939         #[test]
940         fn bad_monitor_string_fails() {
941                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
942                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
943                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
944         }
945
946         // Exercise the `MonitorUpdatingPersister` with real channels and payments.
947         #[test]
948         fn persister_with_real_monitors() {
949                 // This value is used later to limit how many iterations we perform.
950                 let persister_0_max_pending_updates = 7;
951                 // Intentionally set this to a smaller value to test a different alignment.
952                 let persister_1_max_pending_updates = 3;
953                 let chanmon_cfgs = create_chanmon_cfgs(4);
954                 let persister_0 = MonitorUpdatingPersister {
955                         kv_store: &TestStore::new(false),
956                         logger: &TestLogger::new(),
957                         maximum_pending_updates: persister_0_max_pending_updates,
958                         entropy_source: &chanmon_cfgs[0].keys_manager,
959                         signer_provider: &chanmon_cfgs[0].keys_manager,
960                 };
961                 let persister_1 = MonitorUpdatingPersister {
962                         kv_store: &TestStore::new(false),
963                         logger: &TestLogger::new(),
964                         maximum_pending_updates: persister_1_max_pending_updates,
965                         entropy_source: &chanmon_cfgs[1].keys_manager,
966                         signer_provider: &chanmon_cfgs[1].keys_manager,
967                 };
968                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
969                 let chain_mon_0 = test_utils::TestChainMonitor::new(
970                         Some(&chanmon_cfgs[0].chain_source),
971                         &chanmon_cfgs[0].tx_broadcaster,
972                         &chanmon_cfgs[0].logger,
973                         &chanmon_cfgs[0].fee_estimator,
974                         &persister_0,
975                         &chanmon_cfgs[0].keys_manager,
976                 );
977                 let chain_mon_1 = test_utils::TestChainMonitor::new(
978                         Some(&chanmon_cfgs[1].chain_source),
979                         &chanmon_cfgs[1].tx_broadcaster,
980                         &chanmon_cfgs[1].logger,
981                         &chanmon_cfgs[1].fee_estimator,
982                         &persister_1,
983                         &chanmon_cfgs[1].keys_manager,
984                 );
985                 node_cfgs[0].chain_monitor = chain_mon_0;
986                 node_cfgs[1].chain_monitor = chain_mon_1;
987                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
988                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
989                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
990                 let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
991
992                 // Check that the persisted channel data is empty before any channels are
993                 // open.
994                 let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
995                         &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
996                 assert_eq!(persisted_chan_data_0.len(), 0);
997                 let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
998                         &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
999                 assert_eq!(persisted_chan_data_1.len(), 0);
1000
1001                 // Helper to make sure the channel is on the expected update ID.
1002                 macro_rules! check_persisted_data {
1003                         ($expected_update_id: expr) => {
1004                                 persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
1005                                         &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1006                                 // check that we stored only one monitor
1007                                 assert_eq!(persisted_chan_data_0.len(), 1);
1008                                 for (_, mon) in persisted_chan_data_0.iter() {
1009                                         // check that when we read it, we got the right update id
1010                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
1011
1012                                         // if the CM is at consolidation threshold, ensure no updates are stored.
1013                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
1014                                         if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
1015                                                         || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
1016                                                 assert_eq!(
1017                                                         persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1018                                                                 monitor_name.as_str()).unwrap().len(),
1019                                                         0,
1020                                                         "updates stored when they shouldn't be in persister 0"
1021                                                 );
1022                                         }
1023                                 }
1024                                 persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
1025                                         &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
1026                                 assert_eq!(persisted_chan_data_1.len(), 1);
1027                                 for (_, mon) in persisted_chan_data_1.iter() {
1028                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
1029                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
1030                                         // if the CM is at consolidation threshold, ensure no updates are stored.
1031                                         if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
1032                                                         || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
1033                                                 assert_eq!(
1034                                                         persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1035                                                                 monitor_name.as_str()).unwrap().len(),
1036                                                         0,
1037                                                         "updates stored when they shouldn't be in persister 1"
1038                                                 );
1039                                         }
1040                                 }
1041                         };
1042                 }
1043
1044                 // Create some initial channel and check that a channel was persisted.
1045                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1046                 check_persisted_data!(0);
1047
1048                 // Send a few payments and make sure the monitors are updated to the latest.
1049                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1050                 check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
1051                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1052                 check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
1053
1054                 // Send a few more payments to try all the alignments of max pending updates with
1055                 // updates for a payment sent and received.
1056                 let mut sender = 0;
1057                 for i in 3..=persister_0_max_pending_updates * 2 {
1058                         let receiver;
1059                         if sender == 0 {
1060                                 sender = 1;
1061                                 receiver = 0;
1062                         } else {
1063                                 sender = 0;
1064                                 receiver = 1;
1065                         }
1066                         send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1067                         check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1068                 }
1069
1070                 // Force close because cooperative close doesn't result in any persisted
1071                 // updates.
1072                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1073
1074                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1075                 check_closed_broadcast!(nodes[0], true);
1076                 check_added_monitors!(nodes[0], 1);
1077
1078                 let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1079                 assert_eq!(node_txn.len(), 1);
1080
1081                 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
1082
1083                 check_closed_broadcast!(nodes[1], true);
1084                 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
1085                 check_added_monitors!(nodes[1], 1);
1086
1087                 // Make sure everything is persisted as expected after close.
1088                 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
1089
1090                 // Make sure the expected number of stale updates is present.
1091                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1092                 let (_, monitor) = &persisted_chan_data[0];
1093                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1094                 // The channel should have 0 updates, as it wrote a full monitor and consolidated.
1095                 assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1096                 assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1097         }
1098
1099         // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1100         // monitor or update with it results in the persister returning an UnrecoverableError status.
1101         #[test]
1102         fn unrecoverable_error_on_write_failure() {
1103                 // Set up a dummy channel and force close. This will produce a monitor
1104                 // that we can then use to test persistence.
1105                 let chanmon_cfgs = create_chanmon_cfgs(2);
1106                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1107                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1108                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1109                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1110                 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
1111                 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
1112                 {
1113                         let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1114                         let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
1115                         let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
1116                         let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1117                         let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
1118                         let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
1119
1120                         let ro_persister = MonitorUpdatingPersister {
1121                                 kv_store: &TestStore::new(true),
1122                                 logger: &TestLogger::new(),
1123                                 maximum_pending_updates: 11,
1124                                 entropy_source: node_cfgs[0].keys_manager,
1125                                 signer_provider: node_cfgs[0].keys_manager,
1126                         };
1127                         match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
1128                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1129                                         // correct result
1130                                 }
1131                                 ChannelMonitorUpdateStatus::Completed => {
1132                                         panic!("Completed persisting new channel when shouldn't have")
1133                                 }
1134                                 ChannelMonitorUpdateStatus::InProgress => {
1135                                         panic!("Returned InProgress when shouldn't have")
1136                                 }
1137                         }
1138                         match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
1139                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1140                                         // correct result
1141                                 }
1142                                 ChannelMonitorUpdateStatus::Completed => {
1143                                         panic!("Completed persisting new channel when shouldn't have")
1144                                 }
1145                                 ChannelMonitorUpdateStatus::InProgress => {
1146                                         panic!("Returned InProgress when shouldn't have")
1147                                 }
1148                         }
1149                         added_monitors.clear();
1150                 }
1151                 nodes[1].node.get_and_clear_pending_msg_events();
1152         }
1153
1154         // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1155         #[test]
1156         fn clean_stale_updates_works() {
1157                 let test_max_pending_updates = 7;
1158                 let chanmon_cfgs = create_chanmon_cfgs(3);
1159                 let persister_0 = MonitorUpdatingPersister {
1160                         kv_store: &TestStore::new(false),
1161                         logger: &TestLogger::new(),
1162                         maximum_pending_updates: test_max_pending_updates,
1163                         entropy_source: &chanmon_cfgs[0].keys_manager,
1164                         signer_provider: &chanmon_cfgs[0].keys_manager,
1165                 };
1166                 let persister_1 = MonitorUpdatingPersister {
1167                         kv_store: &TestStore::new(false),
1168                         logger: &TestLogger::new(),
1169                         maximum_pending_updates: test_max_pending_updates,
1170                         entropy_source: &chanmon_cfgs[1].keys_manager,
1171                         signer_provider: &chanmon_cfgs[1].keys_manager,
1172                 };
1173                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1174                 let chain_mon_0 = test_utils::TestChainMonitor::new(
1175                         Some(&chanmon_cfgs[0].chain_source),
1176                         &chanmon_cfgs[0].tx_broadcaster,
1177                         &chanmon_cfgs[0].logger,
1178                         &chanmon_cfgs[0].fee_estimator,
1179                         &persister_0,
1180                         &chanmon_cfgs[0].keys_manager,
1181                 );
1182                 let chain_mon_1 = test_utils::TestChainMonitor::new(
1183                         Some(&chanmon_cfgs[1].chain_source),
1184                         &chanmon_cfgs[1].tx_broadcaster,
1185                         &chanmon_cfgs[1].logger,
1186                         &chanmon_cfgs[1].fee_estimator,
1187                         &persister_1,
1188                         &chanmon_cfgs[1].keys_manager,
1189                 );
1190                 node_cfgs[0].chain_monitor = chain_mon_0;
1191                 node_cfgs[1].chain_monitor = chain_mon_1;
1192                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1193                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1194
1195                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
1196
1197                 // Check that the persisted channel data is empty before any channels are
1198                 // open.
1199                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1200                 assert_eq!(persisted_chan_data.len(), 0);
1201
1202                 // Create some initial channel
1203                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1204
1205                 // Send a few payments to advance the updates a bit
1206                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1207                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1208
1209                 // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1210                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1211                 let (_, monitor) = &persisted_chan_data[0];
1212                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1213                 persister_0
1214                         .kv_store
1215                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
1216                         .unwrap();
1217
1218                 // Do the stale update cleanup
1219                 persister_0.cleanup_stale_updates(false).unwrap();
1220
1221                 // Confirm the stale update is unreadable/gone
1222                 assert!(persister_0
1223                         .kv_store
1224                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
1225                         .is_err());
1226
1227                 // Force close.
1228                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1229                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1230                 check_closed_broadcast!(nodes[0], true);
1231                 check_added_monitors!(nodes[0], 1);
1232
1233                 // Write an update near u64::MAX
1234                 persister_0
1235                         .kv_store
1236                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
1237                         .unwrap();
1238
1239                 // Do the stale update cleanup
1240                 persister_0.cleanup_stale_updates(false).unwrap();
1241
1242                 // Confirm the stale update is unreadable/gone
1243                 assert!(persister_0
1244                         .kv_store
1245                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
1246                         .is_err());
1247         }
1248 }