Merge pull request #2963 from jkczyz/2024-03-a-channel-manager
[rust-lightning] / lightning / src / util / persist.rs
1 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4 // You may not use this file except in accordance with one or both of these
5 // licenses.
6
7 //! This module contains a simple key-value store trait [`KVStore`] that
8 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9 //! and [`ChannelMonitor`] all in one place.
10 //!
11 //! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
12
13 use core::cmp;
14 use core::convert::{TryFrom, TryInto};
15 use core::ops::Deref;
16 use core::str::FromStr;
17 use bitcoin::{BlockHash, Txid};
18
19 use crate::{io, log_error};
20 use crate::alloc::string::ToString;
21 use crate::prelude::*;
22
23 use crate::chain;
24 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
25 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
26 use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
27 use crate::chain::transaction::OutPoint;
28 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
29 use crate::ln::channelmanager::AChannelManager;
30 use crate::routing::gossip::NetworkGraph;
31 use crate::routing::scoring::WriteableScore;
32 use crate::util::logger::Logger;
33 use crate::util::ser::{Readable, ReadableArgs, Writeable};
34
35 /// The alphabet of characters allowed for namespaces and keys.
36 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
37
38 /// The maximum number of characters namespaces and keys may have.
39 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
40
41 /// The primary namespace under which the [`ChannelManager`] will be persisted.
42 ///
43 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
44 pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
45 /// The secondary namespace under which the [`ChannelManager`] will be persisted.
46 ///
47 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
48 pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
49 /// The key under which the [`ChannelManager`] will be persisted.
50 ///
51 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
52 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
53
54 /// The primary namespace under which [`ChannelMonitor`]s will be persisted.
55 pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
56 /// The secondary namespace under which [`ChannelMonitor`]s will be persisted.
57 pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
58 /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
59 pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
60
61 /// The primary namespace under which the [`NetworkGraph`] will be persisted.
62 pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
63 /// The secondary namespace under which the [`NetworkGraph`] will be persisted.
64 pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
65 /// The key under which the [`NetworkGraph`] will be persisted.
66 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
67
68 /// The primary namespace under which the [`WriteableScore`] will be persisted.
69 pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
70 /// The secondary namespace under which the [`WriteableScore`] will be persisted.
71 pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
72 /// The key under which the [`WriteableScore`] will be persisted.
73 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
74
75 /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
76 ///
77 /// This serves to prevent someone from accidentally loading such monitors (which may need
78 /// updates applied to be current) with another implementation.
79 pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
80
81 /// Provides an interface that allows storage and retrieval of persisted values that are associated
82 /// with given keys.
83 ///
84 /// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
85 /// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
86 /// ways, as long as per-namespace key uniqueness is asserted.
87 ///
88 /// Keys and namespaces are required to be valid ASCII strings in the range of
89 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
90 /// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
91 /// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
92 /// that concerns should always be separated by primary namespace first, before secondary
93 /// namespaces are used. While the number of primary namespaces will be relatively small and is
94 /// determined at compile time, there may be many secondary namespaces per primary namespace. Note
95 /// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
96 /// namespace, i.e., conflicts between keys and equally named
97 /// primary namespaces/secondary namespaces must be avoided.
98 ///
99 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
100 /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
101 /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
102 pub trait KVStore {
103         /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
104         /// `key`.
105         ///
106         /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
107         /// `primary_namespace` and `secondary_namespace`.
108         ///
109         /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
110         fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
111         /// Persists the given data under the given `key`.
112         ///
113         /// Will create the given `primary_namespace` and `secondary_namespace` if not already present
114         /// in the store.
115         fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
116         /// Removes any data that had previously been persisted under the given `key`.
117         ///
118         /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
119         /// remove the given `key` at some point in time after the method returns, e.g., as part of an
120         /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
121         /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
122         ///
123         /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
124         /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
125         /// potentially get lost on crash after the method returns. Therefore, this flag should only be
126         /// set for `remove` operations that can be safely replayed at a later time.
127         ///
128         /// Returns successfully if no data will be stored for the given `primary_namespace`,
129         /// `secondary_namespace`, and `key`, independently of whether it was present before its
130         /// invokation or not.
131         fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
132         /// Returns a list of keys that are stored under the given `secondary_namespace` in
133         /// `primary_namespace`.
134         ///
135         /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
136         /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
137         fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
138 }
139
140 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
141 ///
142 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
143 pub trait Persister<'a, CM: Deref, L: Deref, S: WriteableScore<'a>>
144 where
145         CM::Target: 'static + AChannelManager,
146         L::Target: 'static + Logger,
147 {
148         /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
149         ///
150         /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
151         fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error>;
152
153         /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
154         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
155
156         /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
157         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
158 }
159
160
161 impl<'a, A: KVStore, CM: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, CM, L, S> for A
162 where
163         CM::Target: 'static + AChannelManager,
164         L::Target: 'static + Logger,
165 {
166         fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> {
167                 self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
168                         CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
169                         CHANNEL_MANAGER_PERSISTENCE_KEY,
170                         &channel_manager.get_cm().encode())
171         }
172
173         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
174                 self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
175                         NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
176                         NETWORK_GRAPH_PERSISTENCE_KEY,
177                         &network_graph.encode())
178         }
179
180         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
181                 self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
182                         SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
183                         SCORER_PERSISTENCE_KEY,
184                         &scorer.encode())
185         }
186 }
187
188 impl<'a, CM: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, CM, L, S> for dyn KVStore + Send + Sync
189 where
190         CM::Target: 'static + AChannelManager,
191         L::Target: 'static + Logger,
192 {
193         fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> {
194                 self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
195                         CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
196                         CHANNEL_MANAGER_PERSISTENCE_KEY,
197                         &channel_manager.get_cm().encode())
198         }
199
200         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
201                 self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
202                         NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
203                         NETWORK_GRAPH_PERSISTENCE_KEY,
204                         &network_graph.encode())
205         }
206
207         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
208                 self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
209                         SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
210                         SCORER_PERSISTENCE_KEY,
211                         &scorer.encode())
212         }
213 }
214
215 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
216         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
217         // down once these start returning failure.
218         // Then we should return InProgress rather than UnrecoverableError, implying we should probably
219         // just shut down the node since we're not retrying persistence!
220
221         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
222                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
223                 match self.write(
224                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
225                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
226                         &key, &monitor.encode())
227                 {
228                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
229                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
230                 }
231         }
232
233         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
234                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
235                 match self.write(
236                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
237                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
238                         &key, &monitor.encode())
239                 {
240                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
241                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
242                 }
243         }
244 }
245
246 impl<ChannelSigner: WriteableEcdsaChannelSigner> Persist<ChannelSigner> for dyn KVStore + Send + Sync {
247         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
248         // down once these start returning failure.
249         // Then we should return InProgress rather than UnrecoverableError, implying we should probably
250         // just shut down the node since we're not retrying persistence!
251
252         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
253                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
254                 match self.write(
255                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
256                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
257                         &key, &monitor.encode())
258                 {
259                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
260                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
261                 }
262         }
263
264         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
265                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
266                 match self.write(
267                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
268                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
269                         &key, &monitor.encode())
270                 {
271                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
272                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
273                 }
274         }
275 }
276
277 /// Read previously persisted [`ChannelMonitor`]s from the store.
278 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
279         kv_store: K, entropy_source: ES, signer_provider: SP,
280 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
281 where
282         K::Target: KVStore,
283         ES::Target: EntropySource + Sized,
284         SP::Target: SignerProvider + Sized,
285 {
286         let mut res = Vec::new();
287
288         for stored_key in kv_store.list(
289                 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)?
290         {
291                 if stored_key.len() < 66 {
292                         return Err(io::Error::new(
293                                 io::ErrorKind::InvalidData,
294                                 "Stored key has invalid length"));
295                 }
296
297                 let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
298                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
299                 })?;
300
301                 let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
302                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
303                 })?;
304
305                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
306                         &mut io::Cursor::new(
307                                 kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
308                         (&*entropy_source, &*signer_provider),
309                 ) {
310                         Ok((block_hash, channel_monitor)) => {
311                                 if channel_monitor.get_funding_txo().0.txid != txid
312                                         || channel_monitor.get_funding_txo().0.index != index
313                                 {
314                                         return Err(io::Error::new(
315                                                 io::ErrorKind::InvalidData,
316                                                 "ChannelMonitor was stored under the wrong key",
317                                         ));
318                                 }
319                                 res.push((block_hash, channel_monitor));
320                         }
321                         Err(_) => {
322                                 return Err(io::Error::new(
323                                         io::ErrorKind::InvalidData,
324                                         "Failed to read ChannelMonitor"
325                                 ))
326                         }
327                 }
328         }
329         Ok(res)
330 }
331
332 /// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
333 /// [`ChannelMonitorUpdate`]s.
334 ///
335 /// # Overview
336 ///
337 /// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
338 /// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
339 /// deleting) and complexity. This is because it writes channel monitor differential updates,
340 /// whereas the other (default) implementation rewrites the entire monitor on each update. For
341 /// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
342 /// of megabytes (or more). Updates can be as small as a few hundred bytes.
343 ///
344 /// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
345 /// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
346 /// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
347 /// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
348 /// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
349 /// sentinel bytes.
350 ///
351 /// # Storing monitors
352 ///
353 /// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
354 ///
355 ///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
356 ///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
357 ///
358 /// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`],
359 /// using the familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
360 ///
361 /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
362 ///
363 ///   - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`]
364 ///   - secondary namespace: [the monitor's encoded outpoint name]
365 ///
366 /// Under that secondary namespace, each update is stored with a number string, like `21`, which
367 /// represents its `update_id` value.
368 ///
369 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
370 ///
371 ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
372 ///   - Index: `1`
373 ///
374 /// Full channel monitors would be stored at a single key:
375 ///
376 /// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
377 ///
378 /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
379 ///
380 /// ```text
381 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
382 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
383 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
384 /// ```
385 /// ... and so on.
386 ///
387 /// # Reading channel state from storage
388 ///
389 /// Channel state can be reconstructed by calling
390 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
391 /// list channel monitors themselves and load channels individually using
392 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
393 ///
394 /// ## EXTREMELY IMPORTANT
395 ///
396 /// It is extremely important that your [`KVStore::read`] implementation uses the
397 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
398 /// that circumstance (not when there is really a permissions error, for example). This is because
399 /// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
400 /// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
401 /// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
402 ///
403 /// # Pruning stale channel updates
404 ///
405 /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
406 /// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
407 /// are deleted.
408 /// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
409 /// will complete. However, stale updates are not a problem for data integrity, since updates are
410 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
411 ///
412 /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
413 /// would like to get rid of them, consider using the
414 /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
415 pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
416 where
417         K::Target: KVStore,
418         L::Target: Logger,
419         ES::Target: EntropySource + Sized,
420         SP::Target: SignerProvider + Sized,
421 {
422         kv_store: K,
423         logger: L,
424         maximum_pending_updates: u64,
425         entropy_source: ES,
426         signer_provider: SP,
427 }
428
429 #[allow(dead_code)]
430 impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
431         MonitorUpdatingPersister<K, L, ES, SP>
432 where
433         K::Target: KVStore,
434         L::Target: Logger,
435         ES::Target: EntropySource + Sized,
436         SP::Target: SignerProvider + Sized,
437 {
438         /// Constructs a new [`MonitorUpdatingPersister`].
439         ///
440         /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
441         /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
442         /// consolidation will frequently occur with fewer updates than what you set here; this number
443         /// is merely the maximum that may be stored. When setting this value, consider that for higher
444         /// values of `maximum_pending_updates`:
445         ///
446         ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
447         /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
448         /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
449         ///   - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
450         /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
451         /// less frequent "waves."
452         ///   - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
453         /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
454         pub fn new(
455                 kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
456                 signer_provider: SP,
457         ) -> Self {
458                 MonitorUpdatingPersister {
459                         kv_store,
460                         logger,
461                         maximum_pending_updates,
462                         entropy_source,
463                         signer_provider,
464                 }
465         }
466
467         /// Reads all stored channel monitors, along with any stored updates for them.
468         ///
469         /// It is extremely important that your [`KVStore::read`] implementation uses the
470         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
471         /// documentation for [`MonitorUpdatingPersister`].
472         pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
473                 &self, broadcaster: &B, fee_estimator: &F,
474         ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
475         where
476                 B::Target: BroadcasterInterface,
477                 F::Target: FeeEstimator,
478         {
479                 let monitor_list = self.kv_store.list(
480                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
481                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
482                 )?;
483                 let mut res = Vec::with_capacity(monitor_list.len());
484                 for monitor_key in monitor_list {
485                         res.push(self.read_channel_monitor_with_updates(
486                                 broadcaster,
487                                 fee_estimator,
488                                 monitor_key,
489                         )?)
490                 }
491                 Ok(res)
492         }
493
494         /// Read a single channel monitor, along with any stored updates for it.
495         ///
496         /// It is extremely important that your [`KVStore::read`] implementation uses the
497         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
498         /// documentation for [`MonitorUpdatingPersister`].
499         ///
500         /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
501         /// [`OutPoint`], with an underscore `_` between them. For example, given:
502         ///
503         ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
504         ///   - Index: `1`
505         ///
506         /// The correct `monitor_key` would be:
507         /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
508         ///
509         /// Loading a large number of monitors will be faster if done in parallel. You can use this
510         /// function to accomplish this. Take care to limit the number of parallel readers.
511         pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
512                 &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
513         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
514         where
515                 B::Target: BroadcasterInterface,
516                 F::Target: FeeEstimator,
517         {
518                 let monitor_name = MonitorName::new(monitor_key)?;
519                 let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
520                 let mut current_update_id = monitor.get_latest_update_id();
521                 loop {
522                         current_update_id = match current_update_id.checked_add(1) {
523                                 Some(next_update_id) => next_update_id,
524                                 None => break,
525                         };
526                         let update_name = UpdateName::from(current_update_id);
527                         let update = match self.read_monitor_update(&monitor_name, &update_name) {
528                                 Ok(update) => update,
529                                 Err(err) if err.kind() == io::ErrorKind::NotFound => {
530                                         // We can't find any more updates, so we are done.
531                                         break;
532                                 }
533                                 Err(err) => return Err(err),
534                         };
535
536                         monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
537                                 .map_err(|e| {
538                                         log_error!(
539                                                 self.logger,
540                                                 "Monitor update failed. monitor: {} update: {} reason: {:?}",
541                                                 monitor_name.as_str(),
542                                                 update_name.as_str(),
543                                                 e
544                                         );
545                                         io::Error::new(io::ErrorKind::Other, "Monitor update failed")
546                                 })?;
547                 }
548                 Ok((block_hash, monitor))
549         }
550
551         /// Read a channel monitor.
552         fn read_monitor(
553                 &self, monitor_name: &MonitorName,
554         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
555                 let outpoint: OutPoint = monitor_name.try_into()?;
556                 let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
557                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
558                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
559                         monitor_name.as_str(),
560                 )?);
561                 // Discard the sentinel bytes if found.
562                 if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
563                         monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
564                 }
565                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
566                         &mut monitor_cursor,
567                         (&*self.entropy_source, &*self.signer_provider),
568                 ) {
569                         Ok((blockhash, channel_monitor)) => {
570                                 if channel_monitor.get_funding_txo().0.txid != outpoint.txid
571                                         || channel_monitor.get_funding_txo().0.index != outpoint.index
572                                 {
573                                         log_error!(
574                                                 self.logger,
575                                                 "ChannelMonitor {} was stored under the wrong key!",
576                                                 monitor_name.as_str()
577                                         );
578                                         Err(io::Error::new(
579                                                 io::ErrorKind::InvalidData,
580                                                 "ChannelMonitor was stored under the wrong key",
581                                         ))
582                                 } else {
583                                         Ok((blockhash, channel_monitor))
584                                 }
585                         }
586                         Err(e) => {
587                                 log_error!(
588                                         self.logger,
589                                         "Failed to read ChannelMonitor {}, reason: {}",
590                                         monitor_name.as_str(),
591                                         e,
592                                 );
593                                 Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
594                         }
595                 }
596         }
597
598         /// Read a channel monitor update.
599         fn read_monitor_update(
600                 &self, monitor_name: &MonitorName, update_name: &UpdateName,
601         ) -> Result<ChannelMonitorUpdate, io::Error> {
602                 let update_bytes = self.kv_store.read(
603                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
604                         monitor_name.as_str(),
605                         update_name.as_str(),
606                 )?;
607                 ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
608                         log_error!(
609                                 self.logger,
610                                 "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
611                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
612                                 monitor_name.as_str(),
613                                 update_name.as_str(),
614                                 e,
615                         );
616                         io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
617                 })
618         }
619
620         /// Cleans up stale updates for all monitors.
621         ///
622         /// This function works by first listing all monitors, and then for each of them, listing all
623         /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
624         /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
625         /// be passed to [`KVStore::remove`].
626         pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
627                 let monitor_keys = self.kv_store.list(
628                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
629                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
630                 )?;
631                 for monitor_key in monitor_keys {
632                         let monitor_name = MonitorName::new(monitor_key)?;
633                         let (_, current_monitor) = self.read_monitor(&monitor_name)?;
634                         let updates = self
635                                 .kv_store
636                                 .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?;
637                         for update in updates {
638                                 let update_name = UpdateName::new(update)?;
639                                 // if the update_id is lower than the stored monitor, delete
640                                 if update_name.0 <= current_monitor.get_latest_update_id() {
641                                         self.kv_store.remove(
642                                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
643                                                 monitor_name.as_str(),
644                                                 update_name.as_str(),
645                                                 lazy,
646                                         )?;
647                                 }
648                         }
649                 }
650                 Ok(())
651         }
652 }
653
654 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
655         Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
656 where
657         K::Target: KVStore,
658         L::Target: Logger,
659         ES::Target: EntropySource + Sized,
660         SP::Target: SignerProvider + Sized,
661 {
662         /// Persists a new channel. This means writing the entire monitor to the
663         /// parametrized [`KVStore`].
664         fn persist_new_channel(
665                 &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
666                 _monitor_update_call_id: MonitorUpdateId,
667         ) -> chain::ChannelMonitorUpdateStatus {
668                 // Determine the proper key for this monitor
669                 let monitor_name = MonitorName::from(funding_txo);
670                 // Serialize and write the new monitor
671                 let mut monitor_bytes = Vec::with_capacity(
672                         MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
673                 );
674                 monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
675                 monitor.write(&mut monitor_bytes).unwrap();
676                 match self.kv_store.write(
677                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
678                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
679                         monitor_name.as_str(),
680                         &monitor_bytes,
681                 ) {
682                         Ok(_) => {
683                                 chain::ChannelMonitorUpdateStatus::Completed
684                         }
685                         Err(e) => {
686                                 log_error!(
687                                         self.logger,
688                                         "Failed to write ChannelMonitor {}/{}/{} reason: {}",
689                                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
690                                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
691                                         monitor_name.as_str(),
692                                         e
693                                 );
694                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
695                         }
696                 }
697         }
698
699         /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
700         ///
701         /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
702         ///
703         ///   - No full monitor is found in [`KVStore`]
704         ///   - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
705         ///   - LDK commands re-persisting the entire monitor through this function, specifically when
706         ///     `update` is `None`.
707         ///   - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
708         fn update_persisted_channel(
709                 &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
710                 monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
711         ) -> chain::ChannelMonitorUpdateStatus {
712                 // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
713                 // ChannelMonitorUpdate's update_id.
714                 if let Some(update) = update {
715                         if update.update_id != CLOSED_CHANNEL_UPDATE_ID
716                                 && update.update_id % self.maximum_pending_updates != 0
717                         {
718                                 let monitor_name = MonitorName::from(funding_txo);
719                                 let update_name = UpdateName::from(update.update_id);
720                                 match self.kv_store.write(
721                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
722                                         monitor_name.as_str(),
723                                         update_name.as_str(),
724                                         &update.encode(),
725                                 ) {
726                                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
727                                         Err(e) => {
728                                                 log_error!(
729                                                         self.logger,
730                                                         "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
731                                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
732                                                         monitor_name.as_str(),
733                                                         update_name.as_str(),
734                                                         e
735                                                 );
736                                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
737                                         }
738                                 }
739                         } else {
740                                 let monitor_name = MonitorName::from(funding_txo);
741                                 // In case of channel-close monitor update, we need to read old monitor before persisting
742                                 // the new one in order to determine the cleanup range.
743                                 let maybe_old_monitor = match monitor.get_latest_update_id() {
744                                         CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
745                                         _ => None
746                                 };
747
748                                 // We could write this update, but it meets criteria of our design that calls for a full monitor write.
749                                 let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
750
751                                 if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
752                                         let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
753                                                 // If there is an error while reading old monitor, we skip clean up.
754                                                 maybe_old_monitor.map(|(_, ref old_monitor)| {
755                                                         let start = old_monitor.get_latest_update_id();
756                                                         // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
757                                                         let end = cmp::min(
758                                                                 start.saturating_add(self.maximum_pending_updates),
759                                                                 CLOSED_CHANNEL_UPDATE_ID - 1,
760                                                         );
761                                                         (start, end)
762                                                 })
763                                         } else {
764                                                 let end = monitor.get_latest_update_id();
765                                                 let start = end.saturating_sub(self.maximum_pending_updates);
766                                                 Some((start, end))
767                                         };
768
769                                         if let Some((start, end)) = cleanup_range {
770                                                 self.cleanup_in_range(monitor_name, start, end);
771                                         }
772                                 }
773
774                                 monitor_update_status
775                         }
776                 } else {
777                         // There is no update given, so we must persist a new monitor.
778                         self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
779                 }
780         }
781 }
782
783 impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
784 where
785         ES::Target: EntropySource + Sized,
786         K::Target: KVStore,
787         L::Target: Logger,
788         SP::Target: SignerProvider + Sized
789 {
790         // Cleans up monitor updates for given monitor in range `start..=end`.
791         fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
792                 for update_id in start..=end {
793                         let update_name = UpdateName::from(update_id);
794                         if let Err(e) = self.kv_store.remove(
795                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
796                                 monitor_name.as_str(),
797                                 update_name.as_str(),
798                                 true,
799                         ) {
800                                 log_error!(
801                                         self.logger,
802                                         "Failed to clean up channel monitor updates for monitor {}, reason: {}",
803                                         monitor_name.as_str(),
804                                         e
805                                 );
806                         };
807                 }
808         }
809 }
810
811 /// A struct representing a name for a monitor.
812 #[derive(Debug)]
813 struct MonitorName(String);
814
815 impl MonitorName {
816         /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
817         /// be formed from the given `name`.
818         pub fn new(name: String) -> Result<Self, io::Error> {
819                 MonitorName::do_try_into_outpoint(&name)?;
820                 Ok(Self(name))
821         }
822         /// Convert this monitor name to a str.
823         pub fn as_str(&self) -> &str {
824                 &self.0
825         }
826         /// Attempt to form a valid [`OutPoint`] from a given name string.
827         fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
828                 let mut parts = name.splitn(2, '_');
829                 let txid = if let Some(part) = parts.next() {
830                         Txid::from_str(part).map_err(|_| {
831                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
832                         })?
833                 } else {
834                         return Err(io::Error::new(
835                                 io::ErrorKind::InvalidData,
836                                 "Stored monitor key is not a splittable string",
837                         ));
838                 };
839                 let index = if let Some(part) = parts.next() {
840                         part.parse().map_err(|_| {
841                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
842                         })?
843                 } else {
844                         return Err(io::Error::new(
845                                 io::ErrorKind::InvalidData,
846                                 "No tx index value found after underscore in stored key",
847                         ));
848                 };
849                 Ok(OutPoint { txid, index })
850         }
851 }
852
853 impl TryFrom<&MonitorName> for OutPoint {
854         type Error = io::Error;
855
856         fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
857                 MonitorName::do_try_into_outpoint(&value.0)
858         }
859 }
860
861 impl From<OutPoint> for MonitorName {
862         fn from(value: OutPoint) -> Self {
863                 MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
864         }
865 }
866
867 /// A struct representing a name for an update.
868 #[derive(Debug)]
869 struct UpdateName(u64, String);
870
871 impl UpdateName {
872         /// Constructs an [`UpdateName`], after verifying that an update sequence ID
873         /// can be derived from the given `name`.
874         pub fn new(name: String) -> Result<Self, io::Error> {
875                 match name.parse::<u64>() {
876                         Ok(u) => Ok(u.into()),
877                         Err(_) => {
878                                 Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
879                         }
880                 }
881         }
882
883         /// Convert this monitor update name to a &str
884         pub fn as_str(&self) -> &str {
885                 &self.1
886         }
887 }
888
889 impl From<u64> for UpdateName {
890         fn from(value: u64) -> Self {
891                 Self(value, value.to_string())
892         }
893 }
894
895 #[cfg(test)]
896 mod tests {
897         use super::*;
898         use crate::chain::chainmonitor::Persist;
899         use crate::chain::ChannelMonitorUpdateStatus;
900         use crate::events::{ClosureReason, MessageSendEventsProvider};
901         use crate::ln::functional_test_utils::*;
902         use crate::util::test_utils::{self, TestLogger, TestStore};
903         use crate::{check_added_monitors, check_closed_broadcast};
904
905         const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
906
907         #[test]
908         fn converts_u64_to_update_name() {
909                 assert_eq!(UpdateName::from(0).as_str(), "0");
910                 assert_eq!(UpdateName::from(21).as_str(), "21");
911                 assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
912         }
913
914         #[test]
915         fn bad_update_name_fails() {
916                 assert!(UpdateName::new("deadbeef".to_string()).is_err());
917                 assert!(UpdateName::new("-1".to_string()).is_err());
918         }
919
920         #[test]
921         fn monitor_from_outpoint_works() {
922                 let monitor_name1 = MonitorName::from(OutPoint {
923                         txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
924                         index: 1,
925                 });
926                 assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
927
928                 let monitor_name2 = MonitorName::from(OutPoint {
929                         txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
930                         index: u16::MAX,
931                 });
932                 assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
933         }
934
935         #[test]
936         fn bad_monitor_string_fails() {
937                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
938                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
939                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
940         }
941
942         // Exercise the `MonitorUpdatingPersister` with real channels and payments.
943         #[test]
944         fn persister_with_real_monitors() {
945                 // This value is used later to limit how many iterations we perform.
946                 let persister_0_max_pending_updates = 7;
947                 // Intentionally set this to a smaller value to test a different alignment.
948                 let persister_1_max_pending_updates = 3;
949                 let chanmon_cfgs = create_chanmon_cfgs(4);
950                 let persister_0 = MonitorUpdatingPersister {
951                         kv_store: &TestStore::new(false),
952                         logger: &TestLogger::new(),
953                         maximum_pending_updates: persister_0_max_pending_updates,
954                         entropy_source: &chanmon_cfgs[0].keys_manager,
955                         signer_provider: &chanmon_cfgs[0].keys_manager,
956                 };
957                 let persister_1 = MonitorUpdatingPersister {
958                         kv_store: &TestStore::new(false),
959                         logger: &TestLogger::new(),
960                         maximum_pending_updates: persister_1_max_pending_updates,
961                         entropy_source: &chanmon_cfgs[1].keys_manager,
962                         signer_provider: &chanmon_cfgs[1].keys_manager,
963                 };
964                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
965                 let chain_mon_0 = test_utils::TestChainMonitor::new(
966                         Some(&chanmon_cfgs[0].chain_source),
967                         &chanmon_cfgs[0].tx_broadcaster,
968                         &chanmon_cfgs[0].logger,
969                         &chanmon_cfgs[0].fee_estimator,
970                         &persister_0,
971                         &chanmon_cfgs[0].keys_manager,
972                 );
973                 let chain_mon_1 = test_utils::TestChainMonitor::new(
974                         Some(&chanmon_cfgs[1].chain_source),
975                         &chanmon_cfgs[1].tx_broadcaster,
976                         &chanmon_cfgs[1].logger,
977                         &chanmon_cfgs[1].fee_estimator,
978                         &persister_1,
979                         &chanmon_cfgs[1].keys_manager,
980                 );
981                 node_cfgs[0].chain_monitor = chain_mon_0;
982                 node_cfgs[1].chain_monitor = chain_mon_1;
983                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
984                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
985                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
986                 let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
987
988                 // Check that the persisted channel data is empty before any channels are
989                 // open.
990                 let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
991                         &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
992                 assert_eq!(persisted_chan_data_0.len(), 0);
993                 let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
994                         &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
995                 assert_eq!(persisted_chan_data_1.len(), 0);
996
997                 // Helper to make sure the channel is on the expected update ID.
998                 macro_rules! check_persisted_data {
999                         ($expected_update_id: expr) => {
1000                                 persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
1001                                         &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1002                                 // check that we stored only one monitor
1003                                 assert_eq!(persisted_chan_data_0.len(), 1);
1004                                 for (_, mon) in persisted_chan_data_0.iter() {
1005                                         // check that when we read it, we got the right update id
1006                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
1007
1008                                         // if the CM is at consolidation threshold, ensure no updates are stored.
1009                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
1010                                         if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
1011                                                         || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
1012                                                 assert_eq!(
1013                                                         persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1014                                                                 monitor_name.as_str()).unwrap().len(),
1015                                                         0,
1016                                                         "updates stored when they shouldn't be in persister 0"
1017                                                 );
1018                                         }
1019                                 }
1020                                 persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
1021                                         &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
1022                                 assert_eq!(persisted_chan_data_1.len(), 1);
1023                                 for (_, mon) in persisted_chan_data_1.iter() {
1024                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
1025                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
1026                                         // if the CM is at consolidation threshold, ensure no updates are stored.
1027                                         if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
1028                                                         || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
1029                                                 assert_eq!(
1030                                                         persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1031                                                                 monitor_name.as_str()).unwrap().len(),
1032                                                         0,
1033                                                         "updates stored when they shouldn't be in persister 1"
1034                                                 );
1035                                         }
1036                                 }
1037                         };
1038                 }
1039
1040                 // Create some initial channel and check that a channel was persisted.
1041                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1042                 check_persisted_data!(0);
1043
1044                 // Send a few payments and make sure the monitors are updated to the latest.
1045                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1046                 check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
1047                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1048                 check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
1049
1050                 // Send a few more payments to try all the alignments of max pending updates with
1051                 // updates for a payment sent and received.
1052                 let mut sender = 0;
1053                 for i in 3..=persister_0_max_pending_updates * 2 {
1054                         let receiver;
1055                         if sender == 0 {
1056                                 sender = 1;
1057                                 receiver = 0;
1058                         } else {
1059                                 sender = 0;
1060                                 receiver = 1;
1061                         }
1062                         send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1063                         check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1064                 }
1065
1066                 // Force close because cooperative close doesn't result in any persisted
1067                 // updates.
1068                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1069
1070                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1071                 check_closed_broadcast!(nodes[0], true);
1072                 check_added_monitors!(nodes[0], 1);
1073
1074                 let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1075                 assert_eq!(node_txn.len(), 1);
1076
1077                 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
1078
1079                 check_closed_broadcast!(nodes[1], true);
1080                 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
1081                 check_added_monitors!(nodes[1], 1);
1082
1083                 // Make sure everything is persisted as expected after close.
1084                 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
1085
1086                 // Make sure the expected number of stale updates is present.
1087                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1088                 let (_, monitor) = &persisted_chan_data[0];
1089                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1090                 // The channel should have 0 updates, as it wrote a full monitor and consolidated.
1091                 assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1092                 assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1093         }
1094
1095         // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1096         // monitor or update with it results in the persister returning an UnrecoverableError status.
1097         #[test]
1098         fn unrecoverable_error_on_write_failure() {
1099                 // Set up a dummy channel and force close. This will produce a monitor
1100                 // that we can then use to test persistence.
1101                 let chanmon_cfgs = create_chanmon_cfgs(2);
1102                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1103                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1104                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1105                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1106                 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
1107                 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
1108                 {
1109                         let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1110                         let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
1111                         let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
1112                         let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1113                         let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
1114                         let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
1115
1116                         let ro_persister = MonitorUpdatingPersister {
1117                                 kv_store: &TestStore::new(true),
1118                                 logger: &TestLogger::new(),
1119                                 maximum_pending_updates: 11,
1120                                 entropy_source: node_cfgs[0].keys_manager,
1121                                 signer_provider: node_cfgs[0].keys_manager,
1122                         };
1123                         match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
1124                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1125                                         // correct result
1126                                 }
1127                                 ChannelMonitorUpdateStatus::Completed => {
1128                                         panic!("Completed persisting new channel when shouldn't have")
1129                                 }
1130                                 ChannelMonitorUpdateStatus::InProgress => {
1131                                         panic!("Returned InProgress when shouldn't have")
1132                                 }
1133                         }
1134                         match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
1135                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1136                                         // correct result
1137                                 }
1138                                 ChannelMonitorUpdateStatus::Completed => {
1139                                         panic!("Completed persisting new channel when shouldn't have")
1140                                 }
1141                                 ChannelMonitorUpdateStatus::InProgress => {
1142                                         panic!("Returned InProgress when shouldn't have")
1143                                 }
1144                         }
1145                         added_monitors.clear();
1146                 }
1147                 nodes[1].node.get_and_clear_pending_msg_events();
1148         }
1149
1150         // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1151         #[test]
1152         fn clean_stale_updates_works() {
1153                 let test_max_pending_updates = 7;
1154                 let chanmon_cfgs = create_chanmon_cfgs(3);
1155                 let persister_0 = MonitorUpdatingPersister {
1156                         kv_store: &TestStore::new(false),
1157                         logger: &TestLogger::new(),
1158                         maximum_pending_updates: test_max_pending_updates,
1159                         entropy_source: &chanmon_cfgs[0].keys_manager,
1160                         signer_provider: &chanmon_cfgs[0].keys_manager,
1161                 };
1162                 let persister_1 = MonitorUpdatingPersister {
1163                         kv_store: &TestStore::new(false),
1164                         logger: &TestLogger::new(),
1165                         maximum_pending_updates: test_max_pending_updates,
1166                         entropy_source: &chanmon_cfgs[1].keys_manager,
1167                         signer_provider: &chanmon_cfgs[1].keys_manager,
1168                 };
1169                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1170                 let chain_mon_0 = test_utils::TestChainMonitor::new(
1171                         Some(&chanmon_cfgs[0].chain_source),
1172                         &chanmon_cfgs[0].tx_broadcaster,
1173                         &chanmon_cfgs[0].logger,
1174                         &chanmon_cfgs[0].fee_estimator,
1175                         &persister_0,
1176                         &chanmon_cfgs[0].keys_manager,
1177                 );
1178                 let chain_mon_1 = test_utils::TestChainMonitor::new(
1179                         Some(&chanmon_cfgs[1].chain_source),
1180                         &chanmon_cfgs[1].tx_broadcaster,
1181                         &chanmon_cfgs[1].logger,
1182                         &chanmon_cfgs[1].fee_estimator,
1183                         &persister_1,
1184                         &chanmon_cfgs[1].keys_manager,
1185                 );
1186                 node_cfgs[0].chain_monitor = chain_mon_0;
1187                 node_cfgs[1].chain_monitor = chain_mon_1;
1188                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1189                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1190
1191                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
1192
1193                 // Check that the persisted channel data is empty before any channels are
1194                 // open.
1195                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1196                 assert_eq!(persisted_chan_data.len(), 0);
1197
1198                 // Create some initial channel
1199                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1200
1201                 // Send a few payments to advance the updates a bit
1202                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1203                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1204
1205                 // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1206                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1207                 let (_, monitor) = &persisted_chan_data[0];
1208                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1209                 persister_0
1210                         .kv_store
1211                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
1212                         .unwrap();
1213
1214                 // Do the stale update cleanup
1215                 persister_0.cleanup_stale_updates(false).unwrap();
1216
1217                 // Confirm the stale update is unreadable/gone
1218                 assert!(persister_0
1219                         .kv_store
1220                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
1221                         .is_err());
1222
1223                 // Force close.
1224                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1225                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1226                 check_closed_broadcast!(nodes[0], true);
1227                 check_added_monitors!(nodes[0], 1);
1228
1229                 // Write an update near u64::MAX
1230                 persister_0
1231                         .kv_store
1232                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
1233                         .unwrap();
1234
1235                 // Do the stale update cleanup
1236                 persister_0.cleanup_stale_updates(false).unwrap();
1237
1238                 // Confirm the stale update is unreadable/gone
1239                 assert!(persister_0
1240                         .kv_store
1241                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
1242                         .is_err());
1243         }
1244 }