Add `OutputSweeper` utility persisting and sweeping spendable outputs
[rust-lightning] / lightning / src / util / persist.rs
1 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4 // You may not use this file except in accordance with one or both of these
5 // licenses.
6
7 //! This module contains a simple key-value store trait [`KVStore`] that
8 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9 //! and [`ChannelMonitor`] all in one place.
10 //!
11 //! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
12
13 use core::cmp;
14 use core::ops::Deref;
15 use core::str::FromStr;
16 use bitcoin::{BlockHash, Txid};
17
18 use crate::{io, log_error};
19 use crate::prelude::*;
20
21 use crate::chain;
22 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
23 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
24 use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
25 use crate::chain::transaction::OutPoint;
26 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
27 use crate::ln::channelmanager::AChannelManager;
28 use crate::routing::gossip::NetworkGraph;
29 use crate::routing::scoring::WriteableScore;
30 use crate::util::logger::Logger;
31 use crate::util::ser::{Readable, ReadableArgs, Writeable};
32
33 /// The alphabet of characters allowed for namespaces and keys.
34 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
35
36 /// The maximum number of characters namespaces and keys may have.
37 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
38
39 /// The primary namespace under which the [`ChannelManager`] will be persisted.
40 ///
41 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
42 pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
43 /// The secondary namespace under which the [`ChannelManager`] will be persisted.
44 ///
45 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
46 pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
47 /// The key under which the [`ChannelManager`] will be persisted.
48 ///
49 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
50 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
51
52 /// The primary namespace under which [`ChannelMonitor`]s will be persisted.
53 pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
54 /// The secondary namespace under which [`ChannelMonitor`]s will be persisted.
55 pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
56 /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
57 pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
58
59 /// The primary namespace under which the [`NetworkGraph`] will be persisted.
60 pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
61 /// The secondary namespace under which the [`NetworkGraph`] will be persisted.
62 pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
63 /// The key under which the [`NetworkGraph`] will be persisted.
64 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
65
66 /// The primary namespace under which the [`WriteableScore`] will be persisted.
67 pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
68 /// The secondary namespace under which the [`WriteableScore`] will be persisted.
69 pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
70 /// The key under which the [`WriteableScore`] will be persisted.
71 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
72
73 /// The primary namespace under which [`OutputSweeper`] state will be persisted.
74 ///
75 /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
76 pub const OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
77 /// The secondary namespace under which [`OutputSweeper`] state will be persisted.
78 ///
79 /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
80 pub const OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
81 /// The secondary namespace under which [`OutputSweeper`] state will be persisted.
82 /// The key under which [`OutputSweeper`] state will be persisted.
83 ///
84 /// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
85 pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper";
86
87 /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
88 ///
89 /// This serves to prevent someone from accidentally loading such monitors (which may need
90 /// updates applied to be current) with another implementation.
91 pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
92
93 /// Provides an interface that allows storage and retrieval of persisted values that are associated
94 /// with given keys.
95 ///
96 /// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
97 /// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
98 /// ways, as long as per-namespace key uniqueness is asserted.
99 ///
100 /// Keys and namespaces are required to be valid ASCII strings in the range of
101 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
102 /// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
103 /// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
104 /// that concerns should always be separated by primary namespace first, before secondary
105 /// namespaces are used. While the number of primary namespaces will be relatively small and is
106 /// determined at compile time, there may be many secondary namespaces per primary namespace. Note
107 /// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
108 /// namespace, i.e., conflicts between keys and equally named
109 /// primary namespaces/secondary namespaces must be avoided.
110 ///
111 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
112 /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
113 /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
114 pub trait KVStore {
115         /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
116         /// `key`.
117         ///
118         /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
119         /// `primary_namespace` and `secondary_namespace`.
120         ///
121         /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
122         fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
123         /// Persists the given data under the given `key`.
124         ///
125         /// Will create the given `primary_namespace` and `secondary_namespace` if not already present
126         /// in the store.
127         fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
128         /// Removes any data that had previously been persisted under the given `key`.
129         ///
130         /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
131         /// remove the given `key` at some point in time after the method returns, e.g., as part of an
132         /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
133         /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
134         ///
135         /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
136         /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
137         /// potentially get lost on crash after the method returns. Therefore, this flag should only be
138         /// set for `remove` operations that can be safely replayed at a later time.
139         ///
140         /// Returns successfully if no data will be stored for the given `primary_namespace`,
141         /// `secondary_namespace`, and `key`, independently of whether it was present before its
142         /// invokation or not.
143         fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
144         /// Returns a list of keys that are stored under the given `secondary_namespace` in
145         /// `primary_namespace`.
146         ///
147         /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
148         /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
149         fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
150 }
151
152 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
153 ///
154 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
155 pub trait Persister<'a, CM: Deref, L: Deref, S: WriteableScore<'a>>
156 where
157         CM::Target: 'static + AChannelManager,
158         L::Target: 'static + Logger,
159 {
160         /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
161         ///
162         /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
163         fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error>;
164
165         /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
166         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
167
168         /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
169         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
170 }
171
172
173 impl<'a, A: KVStore + ?Sized, CM: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, CM, L, S> for A
174 where
175         CM::Target: 'static + AChannelManager,
176         L::Target: 'static + Logger,
177 {
178         fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> {
179                 self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
180                         CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
181                         CHANNEL_MANAGER_PERSISTENCE_KEY,
182                         &channel_manager.get_cm().encode())
183         }
184
185         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
186                 self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
187                         NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
188                         NETWORK_GRAPH_PERSISTENCE_KEY,
189                         &network_graph.encode())
190         }
191
192         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
193                 self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
194                         SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
195                         SCORER_PERSISTENCE_KEY,
196                         &scorer.encode())
197         }
198 }
199
200 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore + ?Sized> Persist<ChannelSigner> for K {
201         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
202         // down once these start returning failure.
203         // Then we should return InProgress rather than UnrecoverableError, implying we should probably
204         // just shut down the node since we're not retrying persistence!
205
206         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
207                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
208                 match self.write(
209                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
210                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
211                         &key, &monitor.encode())
212                 {
213                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
214                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
215                 }
216         }
217
218         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
219                 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
220                 match self.write(
221                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
222                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
223                         &key, &monitor.encode())
224                 {
225                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
226                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
227                 }
228         }
229 }
230
231 /// Read previously persisted [`ChannelMonitor`]s from the store.
232 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
233         kv_store: K, entropy_source: ES, signer_provider: SP,
234 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
235 where
236         K::Target: KVStore,
237         ES::Target: EntropySource + Sized,
238         SP::Target: SignerProvider + Sized,
239 {
240         let mut res = Vec::new();
241
242         for stored_key in kv_store.list(
243                 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)?
244         {
245                 if stored_key.len() < 66 {
246                         return Err(io::Error::new(
247                                 io::ErrorKind::InvalidData,
248                                 "Stored key has invalid length"));
249                 }
250
251                 let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
252                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
253                 })?;
254
255                 let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
256                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
257                 })?;
258
259                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
260                         &mut io::Cursor::new(
261                                 kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
262                         (&*entropy_source, &*signer_provider),
263                 ) {
264                         Ok((block_hash, channel_monitor)) => {
265                                 if channel_monitor.get_funding_txo().0.txid != txid
266                                         || channel_monitor.get_funding_txo().0.index != index
267                                 {
268                                         return Err(io::Error::new(
269                                                 io::ErrorKind::InvalidData,
270                                                 "ChannelMonitor was stored under the wrong key",
271                                         ));
272                                 }
273                                 res.push((block_hash, channel_monitor));
274                         }
275                         Err(_) => {
276                                 return Err(io::Error::new(
277                                         io::ErrorKind::InvalidData,
278                                         "Failed to read ChannelMonitor"
279                                 ))
280                         }
281                 }
282         }
283         Ok(res)
284 }
285
286 /// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
287 /// [`ChannelMonitorUpdate`]s.
288 ///
289 /// # Overview
290 ///
291 /// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
292 /// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
293 /// deleting) and complexity. This is because it writes channel monitor differential updates,
294 /// whereas the other (default) implementation rewrites the entire monitor on each update. For
295 /// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
296 /// of megabytes (or more). Updates can be as small as a few hundred bytes.
297 ///
298 /// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
299 /// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
300 /// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
301 /// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
302 /// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
303 /// sentinel bytes.
304 ///
305 /// # Storing monitors
306 ///
307 /// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
308 ///
309 ///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
310 ///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
311 ///
312 /// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`],
313 /// using the familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
314 ///
315 /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
316 ///
317 ///   - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`]
318 ///   - secondary namespace: [the monitor's encoded outpoint name]
319 ///
320 /// Under that secondary namespace, each update is stored with a number string, like `21`, which
321 /// represents its `update_id` value.
322 ///
323 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
324 ///
325 ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
326 ///   - Index: `1`
327 ///
328 /// Full channel monitors would be stored at a single key:
329 ///
330 /// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
331 ///
332 /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
333 ///
334 /// ```text
335 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
336 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
337 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
338 /// ```
339 /// ... and so on.
340 ///
341 /// # Reading channel state from storage
342 ///
343 /// Channel state can be reconstructed by calling
344 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
345 /// list channel monitors themselves and load channels individually using
346 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
347 ///
348 /// ## EXTREMELY IMPORTANT
349 ///
350 /// It is extremely important that your [`KVStore::read`] implementation uses the
351 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
352 /// that circumstance (not when there is really a permissions error, for example). This is because
353 /// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
354 /// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
355 /// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
356 ///
357 /// # Pruning stale channel updates
358 ///
359 /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
360 /// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
361 /// are deleted.
362 /// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
363 /// will complete. However, stale updates are not a problem for data integrity, since updates are
364 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
365 ///
366 /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
367 /// would like to get rid of them, consider using the
368 /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
369 pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
370 where
371         K::Target: KVStore,
372         L::Target: Logger,
373         ES::Target: EntropySource + Sized,
374         SP::Target: SignerProvider + Sized,
375 {
376         kv_store: K,
377         logger: L,
378         maximum_pending_updates: u64,
379         entropy_source: ES,
380         signer_provider: SP,
381 }
382
383 #[allow(dead_code)]
384 impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
385         MonitorUpdatingPersister<K, L, ES, SP>
386 where
387         K::Target: KVStore,
388         L::Target: Logger,
389         ES::Target: EntropySource + Sized,
390         SP::Target: SignerProvider + Sized,
391 {
392         /// Constructs a new [`MonitorUpdatingPersister`].
393         ///
394         /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
395         /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
396         /// consolidation will frequently occur with fewer updates than what you set here; this number
397         /// is merely the maximum that may be stored. When setting this value, consider that for higher
398         /// values of `maximum_pending_updates`:
399         ///
400         ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
401         /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
402         /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
403         ///   - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
404         /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
405         /// less frequent "waves."
406         ///   - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
407         /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
408         pub fn new(
409                 kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
410                 signer_provider: SP,
411         ) -> Self {
412                 MonitorUpdatingPersister {
413                         kv_store,
414                         logger,
415                         maximum_pending_updates,
416                         entropy_source,
417                         signer_provider,
418                 }
419         }
420
421         /// Reads all stored channel monitors, along with any stored updates for them.
422         ///
423         /// It is extremely important that your [`KVStore::read`] implementation uses the
424         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
425         /// documentation for [`MonitorUpdatingPersister`].
426         pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
427                 &self, broadcaster: &B, fee_estimator: &F,
428         ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
429         where
430                 B::Target: BroadcasterInterface,
431                 F::Target: FeeEstimator,
432         {
433                 let monitor_list = self.kv_store.list(
434                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
435                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
436                 )?;
437                 let mut res = Vec::with_capacity(monitor_list.len());
438                 for monitor_key in monitor_list {
439                         res.push(self.read_channel_monitor_with_updates(
440                                 broadcaster,
441                                 fee_estimator,
442                                 monitor_key,
443                         )?)
444                 }
445                 Ok(res)
446         }
447
448         /// Read a single channel monitor, along with any stored updates for it.
449         ///
450         /// It is extremely important that your [`KVStore::read`] implementation uses the
451         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
452         /// documentation for [`MonitorUpdatingPersister`].
453         ///
454         /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
455         /// [`OutPoint`], with an underscore `_` between them. For example, given:
456         ///
457         ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
458         ///   - Index: `1`
459         ///
460         /// The correct `monitor_key` would be:
461         /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
462         ///
463         /// Loading a large number of monitors will be faster if done in parallel. You can use this
464         /// function to accomplish this. Take care to limit the number of parallel readers.
465         pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
466                 &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
467         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
468         where
469                 B::Target: BroadcasterInterface,
470                 F::Target: FeeEstimator,
471         {
472                 let monitor_name = MonitorName::new(monitor_key)?;
473                 let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
474                 let mut current_update_id = monitor.get_latest_update_id();
475                 loop {
476                         current_update_id = match current_update_id.checked_add(1) {
477                                 Some(next_update_id) => next_update_id,
478                                 None => break,
479                         };
480                         let update_name = UpdateName::from(current_update_id);
481                         let update = match self.read_monitor_update(&monitor_name, &update_name) {
482                                 Ok(update) => update,
483                                 Err(err) if err.kind() == io::ErrorKind::NotFound => {
484                                         // We can't find any more updates, so we are done.
485                                         break;
486                                 }
487                                 Err(err) => return Err(err),
488                         };
489
490                         monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
491                                 .map_err(|e| {
492                                         log_error!(
493                                                 self.logger,
494                                                 "Monitor update failed. monitor: {} update: {} reason: {:?}",
495                                                 monitor_name.as_str(),
496                                                 update_name.as_str(),
497                                                 e
498                                         );
499                                         io::Error::new(io::ErrorKind::Other, "Monitor update failed")
500                                 })?;
501                 }
502                 Ok((block_hash, monitor))
503         }
504
505         /// Read a channel monitor.
506         fn read_monitor(
507                 &self, monitor_name: &MonitorName,
508         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
509                 let outpoint: OutPoint = monitor_name.try_into()?;
510                 let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
511                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
512                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
513                         monitor_name.as_str(),
514                 )?);
515                 // Discard the sentinel bytes if found.
516                 if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
517                         monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
518                 }
519                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
520                         &mut monitor_cursor,
521                         (&*self.entropy_source, &*self.signer_provider),
522                 ) {
523                         Ok((blockhash, channel_monitor)) => {
524                                 if channel_monitor.get_funding_txo().0.txid != outpoint.txid
525                                         || channel_monitor.get_funding_txo().0.index != outpoint.index
526                                 {
527                                         log_error!(
528                                                 self.logger,
529                                                 "ChannelMonitor {} was stored under the wrong key!",
530                                                 monitor_name.as_str()
531                                         );
532                                         Err(io::Error::new(
533                                                 io::ErrorKind::InvalidData,
534                                                 "ChannelMonitor was stored under the wrong key",
535                                         ))
536                                 } else {
537                                         Ok((blockhash, channel_monitor))
538                                 }
539                         }
540                         Err(e) => {
541                                 log_error!(
542                                         self.logger,
543                                         "Failed to read ChannelMonitor {}, reason: {}",
544                                         monitor_name.as_str(),
545                                         e,
546                                 );
547                                 Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
548                         }
549                 }
550         }
551
552         /// Read a channel monitor update.
553         fn read_monitor_update(
554                 &self, monitor_name: &MonitorName, update_name: &UpdateName,
555         ) -> Result<ChannelMonitorUpdate, io::Error> {
556                 let update_bytes = self.kv_store.read(
557                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
558                         monitor_name.as_str(),
559                         update_name.as_str(),
560                 )?;
561                 ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
562                         log_error!(
563                                 self.logger,
564                                 "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
565                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
566                                 monitor_name.as_str(),
567                                 update_name.as_str(),
568                                 e,
569                         );
570                         io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
571                 })
572         }
573
574         /// Cleans up stale updates for all monitors.
575         ///
576         /// This function works by first listing all monitors, and then for each of them, listing all
577         /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
578         /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
579         /// be passed to [`KVStore::remove`].
580         pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
581                 let monitor_keys = self.kv_store.list(
582                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
583                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
584                 )?;
585                 for monitor_key in monitor_keys {
586                         let monitor_name = MonitorName::new(monitor_key)?;
587                         let (_, current_monitor) = self.read_monitor(&monitor_name)?;
588                         let updates = self
589                                 .kv_store
590                                 .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?;
591                         for update in updates {
592                                 let update_name = UpdateName::new(update)?;
593                                 // if the update_id is lower than the stored monitor, delete
594                                 if update_name.0 <= current_monitor.get_latest_update_id() {
595                                         self.kv_store.remove(
596                                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
597                                                 monitor_name.as_str(),
598                                                 update_name.as_str(),
599                                                 lazy,
600                                         )?;
601                                 }
602                         }
603                 }
604                 Ok(())
605         }
606 }
607
608 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
609         Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
610 where
611         K::Target: KVStore,
612         L::Target: Logger,
613         ES::Target: EntropySource + Sized,
614         SP::Target: SignerProvider + Sized,
615 {
616         /// Persists a new channel. This means writing the entire monitor to the
617         /// parametrized [`KVStore`].
618         fn persist_new_channel(
619                 &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
620                 _monitor_update_call_id: MonitorUpdateId,
621         ) -> chain::ChannelMonitorUpdateStatus {
622                 // Determine the proper key for this monitor
623                 let monitor_name = MonitorName::from(funding_txo);
624                 // Serialize and write the new monitor
625                 let mut monitor_bytes = Vec::with_capacity(
626                         MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
627                 );
628                 monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
629                 monitor.write(&mut monitor_bytes).unwrap();
630                 match self.kv_store.write(
631                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
632                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
633                         monitor_name.as_str(),
634                         &monitor_bytes,
635                 ) {
636                         Ok(_) => {
637                                 chain::ChannelMonitorUpdateStatus::Completed
638                         }
639                         Err(e) => {
640                                 log_error!(
641                                         self.logger,
642                                         "Failed to write ChannelMonitor {}/{}/{} reason: {}",
643                                         CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
644                                         CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
645                                         monitor_name.as_str(),
646                                         e
647                                 );
648                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
649                         }
650                 }
651         }
652
653         /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
654         ///
655         /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
656         ///
657         ///   - No full monitor is found in [`KVStore`]
658         ///   - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
659         ///   - LDK commands re-persisting the entire monitor through this function, specifically when
660         ///     `update` is `None`.
661         ///   - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
662         fn update_persisted_channel(
663                 &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
664                 monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
665         ) -> chain::ChannelMonitorUpdateStatus {
666                 // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
667                 // ChannelMonitorUpdate's update_id.
668                 if let Some(update) = update {
669                         if update.update_id != CLOSED_CHANNEL_UPDATE_ID
670                                 && update.update_id % self.maximum_pending_updates != 0
671                         {
672                                 let monitor_name = MonitorName::from(funding_txo);
673                                 let update_name = UpdateName::from(update.update_id);
674                                 match self.kv_store.write(
675                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
676                                         monitor_name.as_str(),
677                                         update_name.as_str(),
678                                         &update.encode(),
679                                 ) {
680                                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
681                                         Err(e) => {
682                                                 log_error!(
683                                                         self.logger,
684                                                         "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
685                                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
686                                                         monitor_name.as_str(),
687                                                         update_name.as_str(),
688                                                         e
689                                                 );
690                                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
691                                         }
692                                 }
693                         } else {
694                                 let monitor_name = MonitorName::from(funding_txo);
695                                 // In case of channel-close monitor update, we need to read old monitor before persisting
696                                 // the new one in order to determine the cleanup range.
697                                 let maybe_old_monitor = match monitor.get_latest_update_id() {
698                                         CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
699                                         _ => None
700                                 };
701
702                                 // We could write this update, but it meets criteria of our design that calls for a full monitor write.
703                                 let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
704
705                                 if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
706                                         let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
707                                                 // If there is an error while reading old monitor, we skip clean up.
708                                                 maybe_old_monitor.map(|(_, ref old_monitor)| {
709                                                         let start = old_monitor.get_latest_update_id();
710                                                         // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
711                                                         let end = cmp::min(
712                                                                 start.saturating_add(self.maximum_pending_updates),
713                                                                 CLOSED_CHANNEL_UPDATE_ID - 1,
714                                                         );
715                                                         (start, end)
716                                                 })
717                                         } else {
718                                                 let end = monitor.get_latest_update_id();
719                                                 let start = end.saturating_sub(self.maximum_pending_updates);
720                                                 Some((start, end))
721                                         };
722
723                                         if let Some((start, end)) = cleanup_range {
724                                                 self.cleanup_in_range(monitor_name, start, end);
725                                         }
726                                 }
727
728                                 monitor_update_status
729                         }
730                 } else {
731                         // There is no update given, so we must persist a new monitor.
732                         self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
733                 }
734         }
735 }
736
737 impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
738 where
739         ES::Target: EntropySource + Sized,
740         K::Target: KVStore,
741         L::Target: Logger,
742         SP::Target: SignerProvider + Sized
743 {
744         // Cleans up monitor updates for given monitor in range `start..=end`.
745         fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
746                 for update_id in start..=end {
747                         let update_name = UpdateName::from(update_id);
748                         if let Err(e) = self.kv_store.remove(
749                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
750                                 monitor_name.as_str(),
751                                 update_name.as_str(),
752                                 true,
753                         ) {
754                                 log_error!(
755                                         self.logger,
756                                         "Failed to clean up channel monitor updates for monitor {}, reason: {}",
757                                         monitor_name.as_str(),
758                                         e
759                                 );
760                         };
761                 }
762         }
763 }
764
765 /// A struct representing a name for a monitor.
766 #[derive(Debug)]
767 struct MonitorName(String);
768
769 impl MonitorName {
770         /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
771         /// be formed from the given `name`.
772         pub fn new(name: String) -> Result<Self, io::Error> {
773                 MonitorName::do_try_into_outpoint(&name)?;
774                 Ok(Self(name))
775         }
776         /// Convert this monitor name to a str.
777         pub fn as_str(&self) -> &str {
778                 &self.0
779         }
780         /// Attempt to form a valid [`OutPoint`] from a given name string.
781         fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
782                 let mut parts = name.splitn(2, '_');
783                 let txid = if let Some(part) = parts.next() {
784                         Txid::from_str(part).map_err(|_| {
785                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
786                         })?
787                 } else {
788                         return Err(io::Error::new(
789                                 io::ErrorKind::InvalidData,
790                                 "Stored monitor key is not a splittable string",
791                         ));
792                 };
793                 let index = if let Some(part) = parts.next() {
794                         part.parse().map_err(|_| {
795                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
796                         })?
797                 } else {
798                         return Err(io::Error::new(
799                                 io::ErrorKind::InvalidData,
800                                 "No tx index value found after underscore in stored key",
801                         ));
802                 };
803                 Ok(OutPoint { txid, index })
804         }
805 }
806
807 impl TryFrom<&MonitorName> for OutPoint {
808         type Error = io::Error;
809
810         fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
811                 MonitorName::do_try_into_outpoint(&value.0)
812         }
813 }
814
815 impl From<OutPoint> for MonitorName {
816         fn from(value: OutPoint) -> Self {
817                 MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
818         }
819 }
820
821 /// A struct representing a name for an update.
822 #[derive(Debug)]
823 struct UpdateName(u64, String);
824
825 impl UpdateName {
826         /// Constructs an [`UpdateName`], after verifying that an update sequence ID
827         /// can be derived from the given `name`.
828         pub fn new(name: String) -> Result<Self, io::Error> {
829                 match name.parse::<u64>() {
830                         Ok(u) => Ok(u.into()),
831                         Err(_) => {
832                                 Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
833                         }
834                 }
835         }
836
837         /// Convert this monitor update name to a &str
838         pub fn as_str(&self) -> &str {
839                 &self.1
840         }
841 }
842
843 impl From<u64> for UpdateName {
844         fn from(value: u64) -> Self {
845                 Self(value, value.to_string())
846         }
847 }
848
849 #[cfg(test)]
850 mod tests {
851         use super::*;
852         use crate::chain::ChannelMonitorUpdateStatus;
853         use crate::events::{ClosureReason, MessageSendEventsProvider};
854         use crate::ln::functional_test_utils::*;
855         use crate::util::test_utils::{self, TestLogger, TestStore};
856         use crate::{check_added_monitors, check_closed_broadcast};
857         use crate::sync::Arc;
858         use crate::util::test_channel_signer::TestChannelSigner;
859
860         const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
861
862         #[test]
863         fn converts_u64_to_update_name() {
864                 assert_eq!(UpdateName::from(0).as_str(), "0");
865                 assert_eq!(UpdateName::from(21).as_str(), "21");
866                 assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
867         }
868
869         #[test]
870         fn bad_update_name_fails() {
871                 assert!(UpdateName::new("deadbeef".to_string()).is_err());
872                 assert!(UpdateName::new("-1".to_string()).is_err());
873         }
874
875         #[test]
876         fn monitor_from_outpoint_works() {
877                 let monitor_name1 = MonitorName::from(OutPoint {
878                         txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
879                         index: 1,
880                 });
881                 assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
882
883                 let monitor_name2 = MonitorName::from(OutPoint {
884                         txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
885                         index: u16::MAX,
886                 });
887                 assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
888         }
889
890         #[test]
891         fn bad_monitor_string_fails() {
892                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
893                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
894                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
895         }
896
897         // Exercise the `MonitorUpdatingPersister` with real channels and payments.
898         #[test]
899         fn persister_with_real_monitors() {
900                 // This value is used later to limit how many iterations we perform.
901                 let persister_0_max_pending_updates = 7;
902                 // Intentionally set this to a smaller value to test a different alignment.
903                 let persister_1_max_pending_updates = 3;
904                 let chanmon_cfgs = create_chanmon_cfgs(4);
905                 let persister_0 = MonitorUpdatingPersister {
906                         kv_store: &TestStore::new(false),
907                         logger: &TestLogger::new(),
908                         maximum_pending_updates: persister_0_max_pending_updates,
909                         entropy_source: &chanmon_cfgs[0].keys_manager,
910                         signer_provider: &chanmon_cfgs[0].keys_manager,
911                 };
912                 let persister_1 = MonitorUpdatingPersister {
913                         kv_store: &TestStore::new(false),
914                         logger: &TestLogger::new(),
915                         maximum_pending_updates: persister_1_max_pending_updates,
916                         entropy_source: &chanmon_cfgs[1].keys_manager,
917                         signer_provider: &chanmon_cfgs[1].keys_manager,
918                 };
919                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
920                 let chain_mon_0 = test_utils::TestChainMonitor::new(
921                         Some(&chanmon_cfgs[0].chain_source),
922                         &chanmon_cfgs[0].tx_broadcaster,
923                         &chanmon_cfgs[0].logger,
924                         &chanmon_cfgs[0].fee_estimator,
925                         &persister_0,
926                         &chanmon_cfgs[0].keys_manager,
927                 );
928                 let chain_mon_1 = test_utils::TestChainMonitor::new(
929                         Some(&chanmon_cfgs[1].chain_source),
930                         &chanmon_cfgs[1].tx_broadcaster,
931                         &chanmon_cfgs[1].logger,
932                         &chanmon_cfgs[1].fee_estimator,
933                         &persister_1,
934                         &chanmon_cfgs[1].keys_manager,
935                 );
936                 node_cfgs[0].chain_monitor = chain_mon_0;
937                 node_cfgs[1].chain_monitor = chain_mon_1;
938                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
939                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
940                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
941                 let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
942
943                 // Check that the persisted channel data is empty before any channels are
944                 // open.
945                 let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
946                         &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
947                 assert_eq!(persisted_chan_data_0.len(), 0);
948                 let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
949                         &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
950                 assert_eq!(persisted_chan_data_1.len(), 0);
951
952                 // Helper to make sure the channel is on the expected update ID.
953                 macro_rules! check_persisted_data {
954                         ($expected_update_id: expr) => {
955                                 persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
956                                         &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
957                                 // check that we stored only one monitor
958                                 assert_eq!(persisted_chan_data_0.len(), 1);
959                                 for (_, mon) in persisted_chan_data_0.iter() {
960                                         // check that when we read it, we got the right update id
961                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
962
963                                         // if the CM is at consolidation threshold, ensure no updates are stored.
964                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
965                                         if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
966                                                         || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
967                                                 assert_eq!(
968                                                         persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
969                                                                 monitor_name.as_str()).unwrap().len(),
970                                                         0,
971                                                         "updates stored when they shouldn't be in persister 0"
972                                                 );
973                                         }
974                                 }
975                                 persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
976                                         &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
977                                 assert_eq!(persisted_chan_data_1.len(), 1);
978                                 for (_, mon) in persisted_chan_data_1.iter() {
979                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
980                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
981                                         // if the CM is at consolidation threshold, ensure no updates are stored.
982                                         if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
983                                                         || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
984                                                 assert_eq!(
985                                                         persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
986                                                                 monitor_name.as_str()).unwrap().len(),
987                                                         0,
988                                                         "updates stored when they shouldn't be in persister 1"
989                                                 );
990                                         }
991                                 }
992                         };
993                 }
994
995                 // Create some initial channel and check that a channel was persisted.
996                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
997                 check_persisted_data!(0);
998
999                 // Send a few payments and make sure the monitors are updated to the latest.
1000                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1001                 check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
1002                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1003                 check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
1004
1005                 // Send a few more payments to try all the alignments of max pending updates with
1006                 // updates for a payment sent and received.
1007                 let mut sender = 0;
1008                 for i in 3..=persister_0_max_pending_updates * 2 {
1009                         let receiver;
1010                         if sender == 0 {
1011                                 sender = 1;
1012                                 receiver = 0;
1013                         } else {
1014                                 sender = 0;
1015                                 receiver = 1;
1016                         }
1017                         send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1018                         check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1019                 }
1020
1021                 // Force close because cooperative close doesn't result in any persisted
1022                 // updates.
1023                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1024
1025                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1026                 check_closed_broadcast!(nodes[0], true);
1027                 check_added_monitors!(nodes[0], 1);
1028
1029                 let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1030                 assert_eq!(node_txn.len(), 1);
1031
1032                 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
1033
1034                 check_closed_broadcast!(nodes[1], true);
1035                 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
1036                 check_added_monitors!(nodes[1], 1);
1037
1038                 // Make sure everything is persisted as expected after close.
1039                 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
1040
1041                 // Make sure the expected number of stale updates is present.
1042                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1043                 let (_, monitor) = &persisted_chan_data[0];
1044                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1045                 // The channel should have 0 updates, as it wrote a full monitor and consolidated.
1046                 assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1047                 assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1048         }
1049
1050         // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1051         // monitor or update with it results in the persister returning an UnrecoverableError status.
1052         #[test]
1053         fn unrecoverable_error_on_write_failure() {
1054                 // Set up a dummy channel and force close. This will produce a monitor
1055                 // that we can then use to test persistence.
1056                 let chanmon_cfgs = create_chanmon_cfgs(2);
1057                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1058                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1059                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1060                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1061                 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
1062                 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
1063                 {
1064                         let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1065                         let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
1066                         let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
1067                         let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1068                         let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
1069                         let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
1070
1071                         let ro_persister = MonitorUpdatingPersister {
1072                                 kv_store: &TestStore::new(true),
1073                                 logger: &TestLogger::new(),
1074                                 maximum_pending_updates: 11,
1075                                 entropy_source: node_cfgs[0].keys_manager,
1076                                 signer_provider: node_cfgs[0].keys_manager,
1077                         };
1078                         match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
1079                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1080                                         // correct result
1081                                 }
1082                                 ChannelMonitorUpdateStatus::Completed => {
1083                                         panic!("Completed persisting new channel when shouldn't have")
1084                                 }
1085                                 ChannelMonitorUpdateStatus::InProgress => {
1086                                         panic!("Returned InProgress when shouldn't have")
1087                                 }
1088                         }
1089                         match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
1090                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1091                                         // correct result
1092                                 }
1093                                 ChannelMonitorUpdateStatus::Completed => {
1094                                         panic!("Completed persisting new channel when shouldn't have")
1095                                 }
1096                                 ChannelMonitorUpdateStatus::InProgress => {
1097                                         panic!("Returned InProgress when shouldn't have")
1098                                 }
1099                         }
1100                         added_monitors.clear();
1101                 }
1102                 nodes[1].node.get_and_clear_pending_msg_events();
1103         }
1104
1105         // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1106         #[test]
1107         fn clean_stale_updates_works() {
1108                 let test_max_pending_updates = 7;
1109                 let chanmon_cfgs = create_chanmon_cfgs(3);
1110                 let persister_0 = MonitorUpdatingPersister {
1111                         kv_store: &TestStore::new(false),
1112                         logger: &TestLogger::new(),
1113                         maximum_pending_updates: test_max_pending_updates,
1114                         entropy_source: &chanmon_cfgs[0].keys_manager,
1115                         signer_provider: &chanmon_cfgs[0].keys_manager,
1116                 };
1117                 let persister_1 = MonitorUpdatingPersister {
1118                         kv_store: &TestStore::new(false),
1119                         logger: &TestLogger::new(),
1120                         maximum_pending_updates: test_max_pending_updates,
1121                         entropy_source: &chanmon_cfgs[1].keys_manager,
1122                         signer_provider: &chanmon_cfgs[1].keys_manager,
1123                 };
1124                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1125                 let chain_mon_0 = test_utils::TestChainMonitor::new(
1126                         Some(&chanmon_cfgs[0].chain_source),
1127                         &chanmon_cfgs[0].tx_broadcaster,
1128                         &chanmon_cfgs[0].logger,
1129                         &chanmon_cfgs[0].fee_estimator,
1130                         &persister_0,
1131                         &chanmon_cfgs[0].keys_manager,
1132                 );
1133                 let chain_mon_1 = test_utils::TestChainMonitor::new(
1134                         Some(&chanmon_cfgs[1].chain_source),
1135                         &chanmon_cfgs[1].tx_broadcaster,
1136                         &chanmon_cfgs[1].logger,
1137                         &chanmon_cfgs[1].fee_estimator,
1138                         &persister_1,
1139                         &chanmon_cfgs[1].keys_manager,
1140                 );
1141                 node_cfgs[0].chain_monitor = chain_mon_0;
1142                 node_cfgs[1].chain_monitor = chain_mon_1;
1143                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1144                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1145
1146                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
1147
1148                 // Check that the persisted channel data is empty before any channels are
1149                 // open.
1150                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1151                 assert_eq!(persisted_chan_data.len(), 0);
1152
1153                 // Create some initial channel
1154                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1155
1156                 // Send a few payments to advance the updates a bit
1157                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1158                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1159
1160                 // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1161                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1162                 let (_, monitor) = &persisted_chan_data[0];
1163                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1164                 persister_0
1165                         .kv_store
1166                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
1167                         .unwrap();
1168
1169                 // Do the stale update cleanup
1170                 persister_0.cleanup_stale_updates(false).unwrap();
1171
1172                 // Confirm the stale update is unreadable/gone
1173                 assert!(persister_0
1174                         .kv_store
1175                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
1176                         .is_err());
1177
1178                 // Force close.
1179                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1180                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1181                 check_closed_broadcast!(nodes[0], true);
1182                 check_added_monitors!(nodes[0], 1);
1183
1184                 // Write an update near u64::MAX
1185                 persister_0
1186                         .kv_store
1187                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
1188                         .unwrap();
1189
1190                 // Do the stale update cleanup
1191                 persister_0.cleanup_stale_updates(false).unwrap();
1192
1193                 // Confirm the stale update is unreadable/gone
1194                 assert!(persister_0
1195                         .kv_store
1196                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
1197                         .is_err());
1198         }
1199
1200         fn persist_fn<P: Deref, ChannelSigner: WriteableEcdsaChannelSigner>(_persist: P) -> bool where P::Target: Persist<ChannelSigner> {
1201                 true
1202         }
1203
1204         #[test]
1205         fn kvstore_trait_object_usage() {
1206                 let store: Arc<dyn KVStore + Send + Sync> = Arc::new(TestStore::new(false));
1207                 assert!(persist_fn::<_, TestChannelSigner>(store.clone()));
1208         }
1209 }