Merge pull request #2964 from jbesraa/prune-stale-chanmonitor
[rust-lightning] / lightning / src / util / persist.rs
1 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4 // You may not use this file except in accordance with one or both of these
5 // licenses.
6
7 //! This module contains a simple key-value store trait [`KVStore`] that
8 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9 //! and [`ChannelMonitor`] all in one place.
10 //!
11 //! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
12
13 use core::cmp;
14 use core::ops::Deref;
15 use core::str::FromStr;
16 use bitcoin::{BlockHash, Txid};
17
18 use crate::{io, log_error};
19 use crate::prelude::*;
20
21 use crate::chain;
22 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
23 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
24 use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
25 use crate::chain::transaction::OutPoint;
26 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
27 use crate::ln::channelmanager::AChannelManager;
28 use crate::routing::gossip::NetworkGraph;
29 use crate::routing::scoring::WriteableScore;
30 use crate::util::logger::Logger;
31 use crate::util::ser::{Readable, ReadableArgs, Writeable};
32
33 /// The alphabet of characters allowed for namespaces and keys.
34 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
35
36 /// The maximum number of characters namespaces and keys may have.
37 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
38
39 /// The primary namespace under which the [`ChannelManager`] will be persisted.
40 ///
41 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
42 pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
43 /// The secondary namespace under which the [`ChannelManager`] will be persisted.
44 ///
45 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
46 pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
47 /// The key under which the [`ChannelManager`] will be persisted.
48 ///
49 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
50 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
51
52 /// The primary namespace under which [`ChannelMonitor`]s will be persisted.
53 pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
54 /// The secondary namespace under which [`ChannelMonitor`]s will be persisted.
55 pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
56 /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
57 pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
58
59 /// The primary namespace under which archived [`ChannelMonitor`]s will be persisted.
60 pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "archived_monitors";
61 /// The secondary namespace under which archived [`ChannelMonitor`]s will be persisted.
62 pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
63
64 /// The primary namespace under which the [`NetworkGraph`] will be persisted.
65 pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
66 /// The secondary namespace under which the [`NetworkGraph`] will be persisted.
67 pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
68 /// The key under which the [`NetworkGraph`] will be persisted.
69 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
70
71 /// The primary namespace under which the [`WriteableScore`] will be persisted.
72 pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
73 /// The secondary namespace under which the [`WriteableScore`] will be persisted.
74 pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
75 /// The key under which the [`WriteableScore`] will be persisted.
76 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
77
78 /// The primary namespace under which [`OutputSweeper`] state will be persisted.
79 ///
80 /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
81 pub const OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
82 /// The secondary namespace under which [`OutputSweeper`] state will be persisted.
83 ///
84 /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
85 pub const OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
86 /// The secondary namespace under which [`OutputSweeper`] state will be persisted.
87 /// The key under which [`OutputSweeper`] state will be persisted.
88 ///
89 /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
90 pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper";
91
92 /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
93 ///
94 /// This serves to prevent someone from accidentally loading such monitors (which may need
95 /// updates applied to be current) with another implementation.
96 pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
97
98 /// Provides an interface that allows storage and retrieval of persisted values that are associated
99 /// with given keys.
100 ///
101 /// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
102 /// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
103 /// ways, as long as per-namespace key uniqueness is asserted.
104 ///
105 /// Keys and namespaces are required to be valid ASCII strings in the range of
106 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
107 /// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
108 /// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
109 /// that concerns should always be separated by primary namespace first, before secondary
110 /// namespaces are used. While the number of primary namespaces will be relatively small and is
111 /// determined at compile time, there may be many secondary namespaces per primary namespace. Note
112 /// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
113 /// namespace, i.e., conflicts between keys and equally named
114 /// primary namespaces/secondary namespaces must be avoided.
115 ///
116 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
117 /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
118 /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
119 pub trait KVStore {
120         /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
121         /// `key`.
122         ///
123         /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
124         /// `primary_namespace` and `secondary_namespace`.
125         ///
126         /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
127         fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
128         /// Persists the given data under the given `key`.
129         ///
130         /// Will create the given `primary_namespace` and `secondary_namespace` if not already present
131         /// in the store.
132         fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
133         /// Removes any data that had previously been persisted under the given `key`.
134         ///
135         /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
136         /// remove the given `key` at some point in time after the method returns, e.g., as part of an
137         /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
138         /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
139         ///
140         /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
141         /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
142         /// potentially get lost on crash after the method returns. Therefore, this flag should only be
143         /// set for `remove` operations that can be safely replayed at a later time.
144         ///
145         /// Returns successfully if no data will be stored for the given `primary_namespace`,
146         /// `secondary_namespace`, and `key`, independently of whether it was present before its
147         /// invokation or not.
148         fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
149         /// Returns a list of keys that are stored under the given `secondary_namespace` in
150         /// `primary_namespace`.
151         ///
152         /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
153         /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
154         fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
155 }
156
157 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
158 ///
159 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
160 pub trait Persister<'a, CM: Deref, L: Deref, S: WriteableScore<'a>>
161 where
162         CM::Target: 'static + AChannelManager,
163         L::Target: 'static + Logger,
164 {
165         /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
166         ///
167         /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
168         fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error>;
169
170         /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
171         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
172
173         /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
174         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
175 }
176
177
178 impl<'a, A: KVStore + ?Sized, CM: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, CM, L, S> for A
179 where
180         CM::Target: 'static + AChannelManager,
181         L::Target: 'static + Logger,
182 {
183         fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> {
184                 self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
185                         CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
186                         CHANNEL_MANAGER_PERSISTENCE_KEY,
187                         &channel_manager.get_cm().encode())
188         }
189
190         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
191                 self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
192                         NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
193                         NETWORK_GRAPH_PERSISTENCE_KEY,
194                         &network_graph.encode())
195         }
196
197         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
198                 self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
199                         SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
200                         SCORER_PERSISTENCE_KEY,
201                         &scorer.encode())
202         }
203 }
204
205 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore + ?Sized> Persist<ChannelSigner> for K {
206         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
207         // down once these start returning failure.
208         // Then we should return InProgress rather than UnrecoverableError, implying we should probably
209         // just shut down the node since we're not retrying persistence!
210
211         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
212                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
213                 match self.write(
214                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
215                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
216                         &key, &monitor.encode())
217                 {
218                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
219                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
220                 }
221         }
222
223         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
224                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
225                 match self.write(
226                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
227                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
228                         &key, &monitor.encode())
229                 {
230                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
231                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
232                 }
233         }
234
235         fn archive_persisted_channel(&self, funding_txo: OutPoint) {
236                 let monitor_name = MonitorName::from(funding_txo);
237                 let monitor = match self.read(
238                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
239                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
240                         monitor_name.as_str(),
241                 ) {
242                         Ok(monitor) => monitor,
243                         Err(_) => return
244                 };
245                 match self.write(
246                         ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
247                         ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
248                         monitor_name.as_str(),
249                         &monitor,
250                 ) {
251                         Ok(()) => {}
252                         Err(_e) => return
253                 };
254                 let _ = self.remove(
255                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
256                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
257                         monitor_name.as_str(),
258                         true,
259                 );
260         }
261 }
262
263 /// Read previously persisted [`ChannelMonitor`]s from the store.
264 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
265         kv_store: K, entropy_source: ES, signer_provider: SP,
266 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
267 where
268         K::Target: KVStore,
269         ES::Target: EntropySource + Sized,
270         SP::Target: SignerProvider + Sized,
271 {
272         let mut res = Vec::new();
273
274         for stored_key in kv_store.list(
275                 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)?
276         {
277                 if stored_key.len() < 66 {
278                         return Err(io::Error::new(
279                                 io::ErrorKind::InvalidData,
280                                 "Stored key has invalid length"));
281                 }
282
283                 let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
284                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
285                 })?;
286
287                 let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
288                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
289                 })?;
290
291                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
292                         &mut io::Cursor::new(
293                                 kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
294                         (&*entropy_source, &*signer_provider),
295                 ) {
296                         Ok((block_hash, channel_monitor)) => {
297                                 if channel_monitor.get_funding_txo().0.txid != txid
298                                         || channel_monitor.get_funding_txo().0.index != index
299                                 {
300                                         return Err(io::Error::new(
301                                                 io::ErrorKind::InvalidData,
302                                                 "ChannelMonitor was stored under the wrong key",
303                                         ));
304                                 }
305                                 res.push((block_hash, channel_monitor));
306                         }
307                         Err(_) => {
308                                 return Err(io::Error::new(
309                                         io::ErrorKind::InvalidData,
310                                         "Failed to read ChannelMonitor"
311                                 ))
312                         }
313                 }
314         }
315         Ok(res)
316 }
317
318 /// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
319 /// [`ChannelMonitorUpdate`]s.
320 ///
321 /// # Overview
322 ///
323 /// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
324 /// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
325 /// deleting) and complexity. This is because it writes channel monitor differential updates,
326 /// whereas the other (default) implementation rewrites the entire monitor on each update. For
327 /// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
328 /// of megabytes (or more). Updates can be as small as a few hundred bytes.
329 ///
330 /// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
331 /// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
332 /// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
333 /// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
334 /// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
335 /// sentinel bytes.
336 ///
337 /// # Storing monitors
338 ///
339 /// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
340 ///
341 ///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
342 ///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
343 ///
344 /// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`],
345 /// using the familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
346 ///
347 /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
348 ///
349 ///   - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`]
350 ///   - secondary namespace: [the monitor's encoded outpoint name]
351 ///
352 /// Under that secondary namespace, each update is stored with a number string, like `21`, which
353 /// represents its `update_id` value.
354 ///
355 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
356 ///
357 ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
358 ///   - Index: `1`
359 ///
360 /// Full channel monitors would be stored at a single key:
361 ///
362 /// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
363 ///
364 /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
365 ///
366 /// ```text
367 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
368 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
369 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
370 /// ```
371 /// ... and so on.
372 ///
373 /// # Reading channel state from storage
374 ///
375 /// Channel state can be reconstructed by calling
376 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
377 /// list channel monitors themselves and load channels individually using
378 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
379 ///
380 /// ## EXTREMELY IMPORTANT
381 ///
382 /// It is extremely important that your [`KVStore::read`] implementation uses the
383 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
384 /// that circumstance (not when there is really a permissions error, for example). This is because
385 /// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
386 /// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
387 /// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
388 ///
389 /// # Pruning stale channel updates
390 ///
391 /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
392 /// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
393 /// are deleted.
394 /// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
395 /// will complete. However, stale updates are not a problem for data integrity, since updates are
396 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
397 ///
398 /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
399 /// would like to get rid of them, consider using the
400 /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
401 pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
402 where
403         K::Target: KVStore,
404         L::Target: Logger,
405         ES::Target: EntropySource + Sized,
406         SP::Target: SignerProvider + Sized,
407 {
408         kv_store: K,
409         logger: L,
410         maximum_pending_updates: u64,
411         entropy_source: ES,
412         signer_provider: SP,
413 }
414
415 #[allow(dead_code)]
416 impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
417         MonitorUpdatingPersister<K, L, ES, SP>
418 where
419         K::Target: KVStore,
420         L::Target: Logger,
421         ES::Target: EntropySource + Sized,
422         SP::Target: SignerProvider + Sized,
423 {
424         /// Constructs a new [`MonitorUpdatingPersister`].
425         ///
426         /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
427         /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
428         /// consolidation will frequently occur with fewer updates than what you set here; this number
429         /// is merely the maximum that may be stored. When setting this value, consider that for higher
430         /// values of `maximum_pending_updates`:
431         ///
432         ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
433         /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
434         /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
435         ///   - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
436         /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
437         /// less frequent "waves."
438         ///   - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
439         /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
440         pub fn new(
441                 kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
442                 signer_provider: SP,
443         ) -> Self {
444                 MonitorUpdatingPersister {
445                         kv_store,
446                         logger,
447                         maximum_pending_updates,
448                         entropy_source,
449                         signer_provider,
450                 }
451         }
452
453         /// Reads all stored channel monitors, along with any stored updates for them.
454         ///
455         /// It is extremely important that your [`KVStore::read`] implementation uses the
456         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
457         /// documentation for [`MonitorUpdatingPersister`].
458         pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
459                 &self, broadcaster: &B, fee_estimator: &F,
460         ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
461         where
462                 B::Target: BroadcasterInterface,
463                 F::Target: FeeEstimator,
464         {
465                 let monitor_list = self.kv_store.list(
466                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
467                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
468                 )?;
469                 let mut res = Vec::with_capacity(monitor_list.len());
470                 for monitor_key in monitor_list {
471                         res.push(self.read_channel_monitor_with_updates(
472                                 broadcaster,
473                                 fee_estimator,
474                                 monitor_key,
475                         )?)
476                 }
477                 Ok(res)
478         }
479
480         /// Read a single channel monitor, along with any stored updates for it.
481         ///
482         /// It is extremely important that your [`KVStore::read`] implementation uses the
483         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
484         /// documentation for [`MonitorUpdatingPersister`].
485         ///
486         /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
487         /// [`OutPoint`], with an underscore `_` between them. For example, given:
488         ///
489         ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
490         ///   - Index: `1`
491         ///
492         /// The correct `monitor_key` would be:
493         /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
494         ///
495         /// Loading a large number of monitors will be faster if done in parallel. You can use this
496         /// function to accomplish this. Take care to limit the number of parallel readers.
497         pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
498                 &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
499         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
500         where
501                 B::Target: BroadcasterInterface,
502                 F::Target: FeeEstimator,
503         {
504                 let monitor_name = MonitorName::new(monitor_key)?;
505                 let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
506                 let mut current_update_id = monitor.get_latest_update_id();
507                 loop {
508                         current_update_id = match current_update_id.checked_add(1) {
509                                 Some(next_update_id) => next_update_id,
510                                 None => break,
511                         };
512                         let update_name = UpdateName::from(current_update_id);
513                         let update = match self.read_monitor_update(&monitor_name, &update_name) {
514                                 Ok(update) => update,
515                                 Err(err) if err.kind() == io::ErrorKind::NotFound => {
516                                         // We can't find any more updates, so we are done.
517                                         break;
518                                 }
519                                 Err(err) => return Err(err),
520                         };
521
522                         monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
523                                 .map_err(|e| {
524                                         log_error!(
525                                                 self.logger,
526                                                 "Monitor update failed. monitor: {} update: {} reason: {:?}",
527                                                 monitor_name.as_str(),
528                                                 update_name.as_str(),
529                                                 e
530                                         );
531                                         io::Error::new(io::ErrorKind::Other, "Monitor update failed")
532                                 })?;
533                 }
534                 Ok((block_hash, monitor))
535         }
536
537         /// Read a channel monitor.
538         fn read_monitor(
539                 &self, monitor_name: &MonitorName,
540         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
541                 let outpoint: OutPoint = monitor_name.try_into()?;
542                 let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
543                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
544                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
545                         monitor_name.as_str(),
546                 )?);
547                 // Discard the sentinel bytes if found.
548                 if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
549                         monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
550                 }
551                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
552                         &mut monitor_cursor,
553                         (&*self.entropy_source, &*self.signer_provider),
554                 ) {
555                         Ok((blockhash, channel_monitor)) => {
556                                 if channel_monitor.get_funding_txo().0.txid != outpoint.txid
557                                         || channel_monitor.get_funding_txo().0.index != outpoint.index
558                                 {
559                                         log_error!(
560                                                 self.logger,
561                                                 "ChannelMonitor {} was stored under the wrong key!",
562                                                 monitor_name.as_str()
563                                         );
564                                         Err(io::Error::new(
565                                                 io::ErrorKind::InvalidData,
566                                                 "ChannelMonitor was stored under the wrong key",
567                                         ))
568                                 } else {
569                                         Ok((blockhash, channel_monitor))
570                                 }
571                         }
572                         Err(e) => {
573                                 log_error!(
574                                         self.logger,
575                                         "Failed to read ChannelMonitor {}, reason: {}",
576                                         monitor_name.as_str(),
577                                         e,
578                                 );
579                                 Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
580                         }
581                 }
582         }
583
584         /// Read a channel monitor update.
585         fn read_monitor_update(
586                 &self, monitor_name: &MonitorName, update_name: &UpdateName,
587         ) -> Result<ChannelMonitorUpdate, io::Error> {
588                 let update_bytes = self.kv_store.read(
589                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
590                         monitor_name.as_str(),
591                         update_name.as_str(),
592                 )?;
593                 ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
594                         log_error!(
595                                 self.logger,
596                                 "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
597                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
598                                 monitor_name.as_str(),
599                                 update_name.as_str(),
600                                 e,
601                         );
602                         io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
603                 })
604         }
605
606         /// Cleans up stale updates for all monitors.
607         ///
608         /// This function works by first listing all monitors, and then for each of them, listing all
609         /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
610         /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
611         /// be passed to [`KVStore::remove`].
612         pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
613                 let monitor_keys = self.kv_store.list(
614                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
615                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
616                 )?;
617                 for monitor_key in monitor_keys {
618                         let monitor_name = MonitorName::new(monitor_key)?;
619                         let (_, current_monitor) = self.read_monitor(&monitor_name)?;
620                         let updates = self
621                                 .kv_store
622                                 .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?;
623                         for update in updates {
624                                 let update_name = UpdateName::new(update)?;
625                                 // if the update_id is lower than the stored monitor, delete
626                                 if update_name.0 <= current_monitor.get_latest_update_id() {
627                                         self.kv_store.remove(
628                                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
629                                                 monitor_name.as_str(),
630                                                 update_name.as_str(),
631                                                 lazy,
632                                         )?;
633                                 }
634                         }
635                 }
636                 Ok(())
637         }
638 }
639
640 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
641         Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
642 where
643         K::Target: KVStore,
644         L::Target: Logger,
645         ES::Target: EntropySource + Sized,
646         SP::Target: SignerProvider + Sized,
647 {
648         /// Persists a new channel. This means writing the entire monitor to the
649         /// parametrized [`KVStore`].
650         fn persist_new_channel(
651                 &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
652                 _monitor_update_call_id: MonitorUpdateId,
653         ) -> chain::ChannelMonitorUpdateStatus {
654                 // Determine the proper key for this monitor
655                 let monitor_name = MonitorName::from(funding_txo);
656                 // Serialize and write the new monitor
657                 let mut monitor_bytes = Vec::with_capacity(
658                         MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
659                 );
660                 monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
661                 monitor.write(&mut monitor_bytes).unwrap();
662                 match self.kv_store.write(
663                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
664                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
665                         monitor_name.as_str(),
666                         &monitor_bytes,
667                 ) {
668                         Ok(_) => {
669                                 chain::ChannelMonitorUpdateStatus::Completed
670                         }
671                         Err(e) => {
672                                 log_error!(
673                                         self.logger,
674                                         "Failed to write ChannelMonitor {}/{}/{} reason: {}",
675                                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
676                                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
677                                         monitor_name.as_str(),
678                                         e
679                                 );
680                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
681                         }
682                 }
683         }
684
685         /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
686         ///
687         /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
688         ///
689         ///   - No full monitor is found in [`KVStore`]
690         ///   - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
691         ///   - LDK commands re-persisting the entire monitor through this function, specifically when
692         ///     `update` is `None`.
693         ///   - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
694         fn update_persisted_channel(
695                 &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
696                 monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
697         ) -> chain::ChannelMonitorUpdateStatus {
698                 // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
699                 // ChannelMonitorUpdate's update_id.
700                 if let Some(update) = update {
701                         if update.update_id != CLOSED_CHANNEL_UPDATE_ID
702                                 && update.update_id % self.maximum_pending_updates != 0
703                         {
704                                 let monitor_name = MonitorName::from(funding_txo);
705                                 let update_name = UpdateName::from(update.update_id);
706                                 match self.kv_store.write(
707                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
708                                         monitor_name.as_str(),
709                                         update_name.as_str(),
710                                         &update.encode(),
711                                 ) {
712                                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
713                                         Err(e) => {
714                                                 log_error!(
715                                                         self.logger,
716                                                         "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
717                                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
718                                                         monitor_name.as_str(),
719                                                         update_name.as_str(),
720                                                         e
721                                                 );
722                                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
723                                         }
724                                 }
725                         } else {
726                                 let monitor_name = MonitorName::from(funding_txo);
727                                 // In case of channel-close monitor update, we need to read old monitor before persisting
728                                 // the new one in order to determine the cleanup range.
729                                 let maybe_old_monitor = match monitor.get_latest_update_id() {
730                                         CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
731                                         _ => None
732                                 };
733
734                                 // We could write this update, but it meets criteria of our design that calls for a full monitor write.
735                                 let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
736
737                                 if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
738                                         let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
739                                                 // If there is an error while reading old monitor, we skip clean up.
740                                                 maybe_old_monitor.map(|(_, ref old_monitor)| {
741                                                         let start = old_monitor.get_latest_update_id();
742                                                         // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
743                                                         let end = cmp::min(
744                                                                 start.saturating_add(self.maximum_pending_updates),
745                                                                 CLOSED_CHANNEL_UPDATE_ID - 1,
746                                                         );
747                                                         (start, end)
748                                                 })
749                                         } else {
750                                                 let end = monitor.get_latest_update_id();
751                                                 let start = end.saturating_sub(self.maximum_pending_updates);
752                                                 Some((start, end))
753                                         };
754
755                                         if let Some((start, end)) = cleanup_range {
756                                                 self.cleanup_in_range(monitor_name, start, end);
757                                         }
758                                 }
759
760                                 monitor_update_status
761                         }
762                 } else {
763                         // There is no update given, so we must persist a new monitor.
764                         self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
765                 }
766         }
767
768         fn archive_persisted_channel(&self, funding_txo: OutPoint) {
769                 let monitor_name = MonitorName::from(funding_txo);
770                 let monitor = match self.read_monitor(&monitor_name) {
771                         Ok((_block_hash, monitor)) => monitor,
772                         Err(_) => return
773                 };
774                 match self.kv_store.write(
775                         ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
776                         ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
777                         monitor_name.as_str(),
778                         &monitor.encode()
779                 ) {
780                         Ok(()) => {},
781                         Err(_e) => return,
782                 };
783                 let _ = self.kv_store.remove(
784                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
785                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
786                         monitor_name.as_str(),
787                         true,
788                 );
789         }
790 }
791
792 impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
793 where
794         ES::Target: EntropySource + Sized,
795         K::Target: KVStore,
796         L::Target: Logger,
797         SP::Target: SignerProvider + Sized
798 {
799         // Cleans up monitor updates for given monitor in range `start..=end`.
800         fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
801                 for update_id in start..=end {
802                         let update_name = UpdateName::from(update_id);
803                         if let Err(e) = self.kv_store.remove(
804                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
805                                 monitor_name.as_str(),
806                                 update_name.as_str(),
807                                 true,
808                         ) {
809                                 log_error!(
810                                         self.logger,
811                                         "Failed to clean up channel monitor updates for monitor {}, reason: {}",
812                                         monitor_name.as_str(),
813                                         e
814                                 );
815                         };
816                 }
817         }
818 }
819
820 /// A struct representing a name for a monitor.
821 #[derive(Debug)]
822 struct MonitorName(String);
823
824 impl MonitorName {
825         /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
826         /// be formed from the given `name`.
827         pub fn new(name: String) -> Result<Self, io::Error> {
828                 MonitorName::do_try_into_outpoint(&name)?;
829                 Ok(Self(name))
830         }
831         /// Convert this monitor name to a str.
832         pub fn as_str(&self) -> &str {
833                 &self.0
834         }
835         /// Attempt to form a valid [`OutPoint`] from a given name string.
836         fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
837                 let mut parts = name.splitn(2, '_');
838                 let txid = if let Some(part) = parts.next() {
839                         Txid::from_str(part).map_err(|_| {
840                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
841                         })?
842                 } else {
843                         return Err(io::Error::new(
844                                 io::ErrorKind::InvalidData,
845                                 "Stored monitor key is not a splittable string",
846                         ));
847                 };
848                 let index = if let Some(part) = parts.next() {
849                         part.parse().map_err(|_| {
850                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
851                         })?
852                 } else {
853                         return Err(io::Error::new(
854                                 io::ErrorKind::InvalidData,
855                                 "No tx index value found after underscore in stored key",
856                         ));
857                 };
858                 Ok(OutPoint { txid, index })
859         }
860 }
861
862 impl TryFrom<&MonitorName> for OutPoint {
863         type Error = io::Error;
864
865         fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
866                 MonitorName::do_try_into_outpoint(&value.0)
867         }
868 }
869
870 impl From<OutPoint> for MonitorName {
871         fn from(value: OutPoint) -> Self {
872                 MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
873         }
874 }
875
876 /// A struct representing a name for an update.
877 #[derive(Debug)]
878 struct UpdateName(u64, String);
879
880 impl UpdateName {
881         /// Constructs an [`UpdateName`], after verifying that an update sequence ID
882         /// can be derived from the given `name`.
883         pub fn new(name: String) -> Result<Self, io::Error> {
884                 match name.parse::<u64>() {
885                         Ok(u) => Ok(u.into()),
886                         Err(_) => {
887                                 Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
888                         }
889                 }
890         }
891
892         /// Convert this monitor update name to a &str
893         pub fn as_str(&self) -> &str {
894                 &self.1
895         }
896 }
897
898 impl From<u64> for UpdateName {
899         fn from(value: u64) -> Self {
900                 Self(value, value.to_string())
901         }
902 }
903
904 #[cfg(test)]
905 mod tests {
906         use super::*;
907         use crate::chain::ChannelMonitorUpdateStatus;
908         use crate::events::{ClosureReason, MessageSendEventsProvider};
909         use crate::ln::functional_test_utils::*;
910         use crate::util::test_utils::{self, TestLogger, TestStore};
911         use crate::{check_added_monitors, check_closed_broadcast};
912         use crate::sync::Arc;
913         use crate::util::test_channel_signer::TestChannelSigner;
914
915         const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
916
917         #[test]
918         fn converts_u64_to_update_name() {
919                 assert_eq!(UpdateName::from(0).as_str(), "0");
920                 assert_eq!(UpdateName::from(21).as_str(), "21");
921                 assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
922         }
923
924         #[test]
925         fn bad_update_name_fails() {
926                 assert!(UpdateName::new("deadbeef".to_string()).is_err());
927                 assert!(UpdateName::new("-1".to_string()).is_err());
928         }
929
930         #[test]
931         fn monitor_from_outpoint_works() {
932                 let monitor_name1 = MonitorName::from(OutPoint {
933                         txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
934                         index: 1,
935                 });
936                 assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
937
938                 let monitor_name2 = MonitorName::from(OutPoint {
939                         txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
940                         index: u16::MAX,
941                 });
942                 assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
943         }
944
945         #[test]
946         fn bad_monitor_string_fails() {
947                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
948                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
949                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
950         }
951
952         // Exercise the `MonitorUpdatingPersister` with real channels and payments.
953         #[test]
954         fn persister_with_real_monitors() {
955                 // This value is used later to limit how many iterations we perform.
956                 let persister_0_max_pending_updates = 7;
957                 // Intentionally set this to a smaller value to test a different alignment.
958                 let persister_1_max_pending_updates = 3;
959                 let chanmon_cfgs = create_chanmon_cfgs(4);
960                 let persister_0 = MonitorUpdatingPersister {
961                         kv_store: &TestStore::new(false),
962                         logger: &TestLogger::new(),
963                         maximum_pending_updates: persister_0_max_pending_updates,
964                         entropy_source: &chanmon_cfgs[0].keys_manager,
965                         signer_provider: &chanmon_cfgs[0].keys_manager,
966                 };
967                 let persister_1 = MonitorUpdatingPersister {
968                         kv_store: &TestStore::new(false),
969                         logger: &TestLogger::new(),
970                         maximum_pending_updates: persister_1_max_pending_updates,
971                         entropy_source: &chanmon_cfgs[1].keys_manager,
972                         signer_provider: &chanmon_cfgs[1].keys_manager,
973                 };
974                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
975                 let chain_mon_0 = test_utils::TestChainMonitor::new(
976                         Some(&chanmon_cfgs[0].chain_source),
977                         &chanmon_cfgs[0].tx_broadcaster,
978                         &chanmon_cfgs[0].logger,
979                         &chanmon_cfgs[0].fee_estimator,
980                         &persister_0,
981                         &chanmon_cfgs[0].keys_manager,
982                 );
983                 let chain_mon_1 = test_utils::TestChainMonitor::new(
984                         Some(&chanmon_cfgs[1].chain_source),
985                         &chanmon_cfgs[1].tx_broadcaster,
986                         &chanmon_cfgs[1].logger,
987                         &chanmon_cfgs[1].fee_estimator,
988                         &persister_1,
989                         &chanmon_cfgs[1].keys_manager,
990                 );
991                 node_cfgs[0].chain_monitor = chain_mon_0;
992                 node_cfgs[1].chain_monitor = chain_mon_1;
993                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
994                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
995                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
996                 let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
997
998                 // Check that the persisted channel data is empty before any channels are
999                 // open.
1000                 let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
1001                         &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1002                 assert_eq!(persisted_chan_data_0.len(), 0);
1003                 let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
1004                         &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
1005                 assert_eq!(persisted_chan_data_1.len(), 0);
1006
1007                 // Helper to make sure the channel is on the expected update ID.
1008                 macro_rules! check_persisted_data {
1009                         ($expected_update_id: expr) => {
1010                                 persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
1011                                         &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1012                                 // check that we stored only one monitor
1013                                 assert_eq!(persisted_chan_data_0.len(), 1);
1014                                 for (_, mon) in persisted_chan_data_0.iter() {
1015                                         // check that when we read it, we got the right update id
1016                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
1017
1018                                         // if the CM is at consolidation threshold, ensure no updates are stored.
1019                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
1020                                         if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
1021                                                         || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
1022                                                 assert_eq!(
1023                                                         persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1024                                                                 monitor_name.as_str()).unwrap().len(),
1025                                                         0,
1026                                                         "updates stored when they shouldn't be in persister 0"
1027                                                 );
1028                                         }
1029                                 }
1030                                 persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
1031                                         &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
1032                                 assert_eq!(persisted_chan_data_1.len(), 1);
1033                                 for (_, mon) in persisted_chan_data_1.iter() {
1034                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
1035                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
1036                                         // if the CM is at consolidation threshold, ensure no updates are stored.
1037                                         if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
1038                                                         || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
1039                                                 assert_eq!(
1040                                                         persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1041                                                                 monitor_name.as_str()).unwrap().len(),
1042                                                         0,
1043                                                         "updates stored when they shouldn't be in persister 1"
1044                                                 );
1045                                         }
1046                                 }
1047                         };
1048                 }
1049
1050                 // Create some initial channel and check that a channel was persisted.
1051                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1052                 check_persisted_data!(0);
1053
1054                 // Send a few payments and make sure the monitors are updated to the latest.
1055                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1056                 check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
1057                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1058                 check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
1059
1060                 // Send a few more payments to try all the alignments of max pending updates with
1061                 // updates for a payment sent and received.
1062                 let mut sender = 0;
1063                 for i in 3..=persister_0_max_pending_updates * 2 {
1064                         let receiver;
1065                         if sender == 0 {
1066                                 sender = 1;
1067                                 receiver = 0;
1068                         } else {
1069                                 sender = 0;
1070                                 receiver = 1;
1071                         }
1072                         send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1073                         check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1074                 }
1075
1076                 // Force close because cooperative close doesn't result in any persisted
1077                 // updates.
1078                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1079
1080                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1081                 check_closed_broadcast!(nodes[0], true);
1082                 check_added_monitors!(nodes[0], 1);
1083
1084                 let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1085                 assert_eq!(node_txn.len(), 1);
1086
1087                 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
1088
1089                 check_closed_broadcast!(nodes[1], true);
1090                 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
1091                 check_added_monitors!(nodes[1], 1);
1092
1093                 // Make sure everything is persisted as expected after close.
1094                 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
1095
1096                 // Make sure the expected number of stale updates is present.
1097                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1098                 let (_, monitor) = &persisted_chan_data[0];
1099                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1100                 // The channel should have 0 updates, as it wrote a full monitor and consolidated.
1101                 assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1102                 assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1103         }
1104
1105         // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1106         // monitor or update with it results in the persister returning an UnrecoverableError status.
1107         #[test]
1108         fn unrecoverable_error_on_write_failure() {
1109                 // Set up a dummy channel and force close. This will produce a monitor
1110                 // that we can then use to test persistence.
1111                 let chanmon_cfgs = create_chanmon_cfgs(2);
1112                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1113                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1114                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1115                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1116                 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
1117                 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
1118                 {
1119                         let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1120                         let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
1121                         let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
1122                         let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1123                         let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
1124                         let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
1125
1126                         let ro_persister = MonitorUpdatingPersister {
1127                                 kv_store: &TestStore::new(true),
1128                                 logger: &TestLogger::new(),
1129                                 maximum_pending_updates: 11,
1130                                 entropy_source: node_cfgs[0].keys_manager,
1131                                 signer_provider: node_cfgs[0].keys_manager,
1132                         };
1133                         match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
1134                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1135                                         // correct result
1136                                 }
1137                                 ChannelMonitorUpdateStatus::Completed => {
1138                                         panic!("Completed persisting new channel when shouldn't have")
1139                                 }
1140                                 ChannelMonitorUpdateStatus::InProgress => {
1141                                         panic!("Returned InProgress when shouldn't have")
1142                                 }
1143                         }
1144                         match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
1145                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1146                                         // correct result
1147                                 }
1148                                 ChannelMonitorUpdateStatus::Completed => {
1149                                         panic!("Completed persisting new channel when shouldn't have")
1150                                 }
1151                                 ChannelMonitorUpdateStatus::InProgress => {
1152                                         panic!("Returned InProgress when shouldn't have")
1153                                 }
1154                         }
1155                         added_monitors.clear();
1156                 }
1157                 nodes[1].node.get_and_clear_pending_msg_events();
1158         }
1159
1160         // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1161         #[test]
1162         fn clean_stale_updates_works() {
1163                 let test_max_pending_updates = 7;
1164                 let chanmon_cfgs = create_chanmon_cfgs(3);
1165                 let persister_0 = MonitorUpdatingPersister {
1166                         kv_store: &TestStore::new(false),
1167                         logger: &TestLogger::new(),
1168                         maximum_pending_updates: test_max_pending_updates,
1169                         entropy_source: &chanmon_cfgs[0].keys_manager,
1170                         signer_provider: &chanmon_cfgs[0].keys_manager,
1171                 };
1172                 let persister_1 = MonitorUpdatingPersister {
1173                         kv_store: &TestStore::new(false),
1174                         logger: &TestLogger::new(),
1175                         maximum_pending_updates: test_max_pending_updates,
1176                         entropy_source: &chanmon_cfgs[1].keys_manager,
1177                         signer_provider: &chanmon_cfgs[1].keys_manager,
1178                 };
1179                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1180                 let chain_mon_0 = test_utils::TestChainMonitor::new(
1181                         Some(&chanmon_cfgs[0].chain_source),
1182                         &chanmon_cfgs[0].tx_broadcaster,
1183                         &chanmon_cfgs[0].logger,
1184                         &chanmon_cfgs[0].fee_estimator,
1185                         &persister_0,
1186                         &chanmon_cfgs[0].keys_manager,
1187                 );
1188                 let chain_mon_1 = test_utils::TestChainMonitor::new(
1189                         Some(&chanmon_cfgs[1].chain_source),
1190                         &chanmon_cfgs[1].tx_broadcaster,
1191                         &chanmon_cfgs[1].logger,
1192                         &chanmon_cfgs[1].fee_estimator,
1193                         &persister_1,
1194                         &chanmon_cfgs[1].keys_manager,
1195                 );
1196                 node_cfgs[0].chain_monitor = chain_mon_0;
1197                 node_cfgs[1].chain_monitor = chain_mon_1;
1198                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1199                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1200
1201                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
1202
1203                 // Check that the persisted channel data is empty before any channels are
1204                 // open.
1205                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1206                 assert_eq!(persisted_chan_data.len(), 0);
1207
1208                 // Create some initial channel
1209                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1210
1211                 // Send a few payments to advance the updates a bit
1212                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1213                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1214
1215                 // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1216                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1217                 let (_, monitor) = &persisted_chan_data[0];
1218                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1219                 persister_0
1220                         .kv_store
1221                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
1222                         .unwrap();
1223
1224                 // Do the stale update cleanup
1225                 persister_0.cleanup_stale_updates(false).unwrap();
1226
1227                 // Confirm the stale update is unreadable/gone
1228                 assert!(persister_0
1229                         .kv_store
1230                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
1231                         .is_err());
1232
1233                 // Force close.
1234                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1235                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1236                 check_closed_broadcast!(nodes[0], true);
1237                 check_added_monitors!(nodes[0], 1);
1238
1239                 // Write an update near u64::MAX
1240                 persister_0
1241                         .kv_store
1242                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
1243                         .unwrap();
1244
1245                 // Do the stale update cleanup
1246                 persister_0.cleanup_stale_updates(false).unwrap();
1247
1248                 // Confirm the stale update is unreadable/gone
1249                 assert!(persister_0
1250                         .kv_store
1251                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
1252                         .is_err());
1253         }
1254
1255         fn persist_fn<P: Deref, ChannelSigner: WriteableEcdsaChannelSigner>(_persist: P) -> bool where P::Target: Persist<ChannelSigner> {
1256                 true
1257         }
1258
1259         #[test]
1260         fn kvstore_trait_object_usage() {
1261                 let store: Arc<dyn KVStore + Send + Sync> = Arc::new(TestStore::new(false));
1262                 assert!(persist_fn::<_, TestChannelSigner>(store.clone()));
1263         }
1264 }