4335a124d4dad3236d7447b0835a60b7a8c97c62
[rust-lightning] / lightning / src / util / persist.rs
1 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4 // You may not use this file except in accordance with one or both of these
5 // licenses.
6
7 //! This module contains a simple key-value store trait [`KVStore`] that
8 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9 //! and [`ChannelMonitor`] all in one place.
10
11 use core::cmp;
12 use core::convert::{TryFrom, TryInto};
13 use core::ops::Deref;
14 use bitcoin::hashes::hex::{FromHex, ToHex};
15 use bitcoin::{BlockHash, Txid};
16
17 use crate::{io, log_error};
18 use crate::alloc::string::ToString;
19 use crate::prelude::*;
20
21 use crate::chain;
22 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
23 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
24 use crate::sign::{EntropySource, NodeSigner, WriteableEcdsaChannelSigner, SignerProvider};
25 use crate::chain::transaction::OutPoint;
26 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
27 use crate::ln::channelmanager::ChannelManager;
28 use crate::routing::router::Router;
29 use crate::routing::gossip::NetworkGraph;
30 use crate::routing::scoring::WriteableScore;
31 use crate::util::logger::Logger;
32 use crate::util::ser::{Readable, ReadableArgs, Writeable};
33
34 /// The alphabet of characters allowed for namespaces and keys.
35 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
36
37 /// The maximum number of characters namespaces and keys may have.
38 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
39
40 /// The namespace under which the [`ChannelManager`] will be persisted.
41 pub const CHANNEL_MANAGER_PERSISTENCE_NAMESPACE: &str = "";
42 /// The sub-namespace under which the [`ChannelManager`] will be persisted.
43 pub const CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE: &str = "";
44 /// The key under which the [`ChannelManager`] will be persisted.
45 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
46
47 /// The namespace under which [`ChannelMonitor`]s will be persisted.
48 pub const CHANNEL_MONITOR_PERSISTENCE_NAMESPACE: &str = "monitors";
49 /// The sub-namespace under which [`ChannelMonitor`]s will be persisted.
50 pub const CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE: &str = "";
51 /// The namespace under which [`ChannelMonitorUpdate`]s will be persisted.
52 pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE: &str = "monitor_updates";
53
54 /// The namespace under which the [`NetworkGraph`] will be persisted.
55 pub const NETWORK_GRAPH_PERSISTENCE_NAMESPACE: &str = "";
56 /// The sub-namespace under which the [`NetworkGraph`] will be persisted.
57 pub const NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE: &str = "";
58 /// The key under which the [`NetworkGraph`] will be persisted.
59 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
60
61 /// The namespace under which the [`WriteableScore`] will be persisted.
62 pub const SCORER_PERSISTENCE_NAMESPACE: &str = "";
63 /// The sub-namespace under which the [`WriteableScore`] will be persisted.
64 pub const SCORER_PERSISTENCE_SUB_NAMESPACE: &str = "";
65 /// The key under which the [`WriteableScore`] will be persisted.
66 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
67
68 /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
69 ///
70 /// This serves to prevent someone from accidentally loading such monitors (which may need
71 /// updates applied to be current) with another implementation.
72 pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
73
74 /// Provides an interface that allows storage and retrieval of persisted values that are associated
75 /// with given keys.
76 ///
77 /// In order to avoid collisions the key space is segmented based on the given `namespace`s and
78 /// `sub_namespace`s. Implementations of this trait are free to handle them in different ways, as
79 /// long as per-namespace key uniqueness is asserted.
80 ///
81 /// Keys and namespaces are required to be valid ASCII strings in the range of
82 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
83 /// namespaces and sub-namespaces (`""`) are assumed to be a valid, however, if `namespace` is
84 /// empty, `sub_namespace` is required to be empty, too. This means that concerns should always be
85 /// separated by namespace first, before sub-namespaces are used. While the number of namespaces
86 /// will be relatively small and is determined at compile time, there may be many sub-namespaces
87 /// per namespace. Note that per-namespace uniqueness needs to also hold for keys *and*
88 /// namespaces/sub-namespaces in any given namespace/sub-namespace, i.e., conflicts between keys
89 /// and equally named namespaces/sub-namespaces must be avoided.
90 ///
91 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
92 /// interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the
93 /// data model previously assumed by `KVStorePersister::persist`.
94 pub trait KVStore {
95         /// Returns the data stored for the given `namespace`, `sub_namespace`, and `key`.
96         ///
97         /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
98         /// `namespace` and `sub_namespace`.
99         ///
100         /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
101         fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
102         /// Persists the given data under the given `key`.
103         ///
104         /// Will create the given `namespace` and `sub_namespace` if not already present in the store.
105         fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
106         /// Removes any data that had previously been persisted under the given `key`.
107         ///
108         /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
109         /// remove the given `key` at some point in time after the method returns, e.g., as part of an
110         /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
111         /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
112         ///
113         /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
114         /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
115         /// potentially get lost on crash after the method returns. Therefore, this flag should only be
116         /// set for `remove` operations that can be safely replayed at a later time.
117         ///
118         /// Returns successfully if no data will be stored for the given `namespace`, `sub_namespace`, and
119         /// `key`, independently of whether it was present before its invokation or not.
120         fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
121         /// Returns a list of keys that are stored under the given `sub_namespace` in `namespace`.
122         ///
123         /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
124         /// returned keys. Returns an empty list if `namespace` or `sub_namespace` is unknown.
125         fn list(&self, namespace: &str, sub_namespace: &str) -> Result<Vec<String>, io::Error>;
126 }
127
128 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
129 pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
130         where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
131                 T::Target: 'static + BroadcasterInterface,
132                 ES::Target: 'static + EntropySource,
133                 NS::Target: 'static + NodeSigner,
134                 SP::Target: 'static + SignerProvider,
135                 F::Target: 'static + FeeEstimator,
136                 R::Target: 'static + Router,
137                 L::Target: 'static + Logger,
138 {
139         /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
140         fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error>;
141
142         /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
143         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
144
145         /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
146         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
147 }
148
149
150 impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
151         where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
152                 T::Target: 'static + BroadcasterInterface,
153                 ES::Target: 'static + EntropySource,
154                 NS::Target: 'static + NodeSigner,
155                 SP::Target: 'static + SignerProvider,
156                 F::Target: 'static + FeeEstimator,
157                 R::Target: 'static + Router,
158                 L::Target: 'static + Logger,
159 {
160         /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
161         fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
162                 self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
163                                    CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
164                                    CHANNEL_MANAGER_PERSISTENCE_KEY,
165                                    &channel_manager.encode())
166         }
167
168         /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
169         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
170                 self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
171                                    NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
172                                    NETWORK_GRAPH_PERSISTENCE_KEY,
173                                    &network_graph.encode())
174         }
175
176         /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
177         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
178                 self.write(SCORER_PERSISTENCE_NAMESPACE,
179                                    SCORER_PERSISTENCE_SUB_NAMESPACE,
180                                    SCORER_PERSISTENCE_KEY,
181                                    &scorer.encode())
182         }
183 }
184
185 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
186         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
187         // down once these start returning failure.
188         // Then we should return InProgress rather than UnrecoverableError, implying we should probably
189         // just shut down the node since we're not retrying persistence!
190
191         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
192                 let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
193                 match self.write(
194                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
195                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
196                         &key, &monitor.encode())
197                 {
198                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
199                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
200                 }
201         }
202
203         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
204                 let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
205                 match self.write(
206                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
207                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
208                         &key, &monitor.encode())
209                 {
210                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
211                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
212                 }
213         }
214 }
215
216 /// Read previously persisted [`ChannelMonitor`]s from the store.
217 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
218         kv_store: K, entropy_source: ES, signer_provider: SP,
219 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
220 where
221         K::Target: KVStore,
222         ES::Target: EntropySource + Sized,
223         SP::Target: SignerProvider + Sized,
224 {
225         let mut res = Vec::new();
226
227         for stored_key in kv_store.list(
228                 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
229         {
230                 if stored_key.len() < 66 {
231                         return Err(io::Error::new(
232                                 io::ErrorKind::InvalidData,
233                                 "Stored key has invalid length"));
234                 }
235
236                 let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
237                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
238                 })?;
239
240                 let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
241                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
242                 })?;
243
244                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
245                         &mut io::Cursor::new(
246                                 kv_store.read(CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE, &stored_key)?),
247                         (&*entropy_source, &*signer_provider),
248                 ) {
249                         Ok((block_hash, channel_monitor)) => {
250                                 if channel_monitor.get_funding_txo().0.txid != txid
251                                         || channel_monitor.get_funding_txo().0.index != index
252                                 {
253                                         return Err(io::Error::new(
254                                                 io::ErrorKind::InvalidData,
255                                                 "ChannelMonitor was stored under the wrong key",
256                                         ));
257                                 }
258                                 res.push((block_hash, channel_monitor));
259                         }
260                         Err(_) => {
261                                 return Err(io::Error::new(
262                                         io::ErrorKind::InvalidData,
263                                         "Failed to read ChannelMonitor"
264                                 ))
265                         }
266                 }
267         }
268         Ok(res)
269 }
270
271 /// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
272 /// [`ChannelMonitorUpdate`]s.
273 ///
274 /// # Overview
275 ///
276 /// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
277 /// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
278 /// deleting) and complexity. This is because it writes channel monitor differential updates,
279 /// whereas the other (default) implementation rewrites the entire monitor on each update. For
280 /// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
281 /// of megabytes (or more). Updates can be as small as a few hundred bytes.
282 ///
283 /// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
284 /// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
285 /// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
286 /// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
287 /// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
288 /// sentinel bytes.
289 ///
290 /// # Storing monitors
291 ///
292 /// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
293 ///
294 ///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
295 ///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
296 ///
297 /// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_NAMESPACE`], using the
298 /// familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
299 ///
300 /// Each [`ChannelMonitorUpdate`] is stored in a dynamic sub-namespace, as follows:
301 ///
302 ///   - namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE`]
303 ///   - sub-namespace: [the monitor's encoded outpoint name]
304 ///
305 /// Under that sub-namespace, each update is stored with a number string, like `21`, which
306 /// represents its `update_id` value.
307 ///
308 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
309 ///
310 ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
311 ///   - Index: `1`
312 ///
313 /// Full channel monitors would be stored at a single key:
314 ///
315 /// `[CHANNEL_MONITOR_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
316 ///
317 /// Updates would be stored as follows (with `/` delimiting namespace/sub-namespace/key):
318 ///
319 /// ```text
320 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
321 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
322 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
323 /// ```
324 /// ... and so on.
325 ///
326 /// # Reading channel state from storage
327 ///
328 /// Channel state can be reconstructed by calling
329 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
330 /// list channel monitors themselves and load channels individually using
331 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
332 /// 
333 /// ## EXTREMELY IMPORTANT
334 /// 
335 /// It is extremely important that your [`KVStore::read`] implementation uses the
336 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
337 /// that circumstance (not when there is really a permissions error, for example). This is because
338 /// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
339 /// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
340 /// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
341 ///
342 /// # Pruning stale channel updates
343 ///
344 /// Stale updates are pruned when a full monitor is written. The old monitor is first read, and if
345 /// that succeeds, updates in the range between the old and new monitors are deleted. The `lazy`
346 /// flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
347 /// will complete. However, stale updates are not a problem for data integrity, since updates are
348 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
349 ///
350 /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
351 /// would like to get rid of them, consider using the
352 /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
353 pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
354 where
355         K::Target: KVStore,
356         L::Target: Logger,
357         ES::Target: EntropySource + Sized,
358         SP::Target: SignerProvider + Sized,
359 {
360         kv_store: K,
361         logger: L,
362         maximum_pending_updates: u64,
363         entropy_source: ES,
364         signer_provider: SP,
365 }
366
367 #[allow(dead_code)]
368 impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
369         MonitorUpdatingPersister<K, L, ES, SP>
370 where
371         K::Target: KVStore,
372         L::Target: Logger,
373         ES::Target: EntropySource + Sized,
374         SP::Target: SignerProvider + Sized,
375 {
376         /// Constructs a new [`MonitorUpdatingPersister`].
377         ///
378         /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
379         /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
380         /// consolidation will frequently occur with fewer updates than what you set here; this number
381         /// is merely the maximum that may be stored. When setting this value, consider that for higher
382         /// values of `maximum_pending_updates`:
383         /// 
384         ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
385         /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
386         /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
387         ///   - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
388         /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
389         /// less frequent "waves."
390         ///   - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
391         /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
392         pub fn new(
393                 kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
394                 signer_provider: SP,
395         ) -> Self
396         where
397                 ES::Target: EntropySource + Sized,
398                 SP::Target: SignerProvider + Sized,
399         {
400                 MonitorUpdatingPersister {
401                         kv_store,
402                         logger,
403                         maximum_pending_updates,
404                         entropy_source,
405                         signer_provider,
406                 }
407         }
408
409         /// Reads all stored channel monitors, along with any stored updates for them.
410         ///
411         /// It is extremely important that your [`KVStore::read`] implementation uses the
412         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
413         /// documentation for [`MonitorUpdatingPersister`].
414         pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref + Clone>(
415                 &self, broadcaster: B, fee_estimator: F,
416         ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
417         where
418                 ES::Target: EntropySource + Sized,
419                 SP::Target: SignerProvider + Sized,
420                 B::Target: BroadcasterInterface,
421                 F::Target: FeeEstimator,
422         {
423                 let monitor_list = self.kv_store.list(
424                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
425                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
426                 )?;
427                 let mut res = Vec::with_capacity(monitor_list.len());
428                 for monitor_key in monitor_list {
429                         res.push(self.read_channel_monitor_with_updates(
430                                 &broadcaster,
431                                 fee_estimator.clone(),
432                                 monitor_key,
433                         )?)
434                 }
435                 Ok(res)
436         }
437
438         /// Read a single channel monitor, along with any stored updates for it.
439         ///
440         /// It is extremely important that your [`KVStore::read`] implementation uses the
441         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
442         /// documentation for [`MonitorUpdatingPersister`].
443         ///
444         /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
445         /// [`OutPoint`], with an underscore `_` between them. For example, given:
446         ///
447         ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
448         ///   - Index: `1`
449         ///
450         /// The correct `monitor_key` would be:
451         /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
452         /// 
453         /// Loading a large number of monitors will be faster if done in parallel. You can use this
454         /// function to accomplish this. Take care to limit the number of parallel readers.
455         pub fn read_channel_monitor_with_updates<B: Deref, F: Deref + Clone>(
456                 &self, broadcaster: &B, fee_estimator: F, monitor_key: String,
457         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
458         where
459                 ES::Target: EntropySource + Sized,
460                 SP::Target: SignerProvider + Sized,
461                 B::Target: BroadcasterInterface,
462                 F::Target: FeeEstimator,
463         {
464                 let monitor_name = MonitorName::new(monitor_key)?;
465                 let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
466                 let mut current_update_id = monitor.get_latest_update_id();
467                 loop {
468                         current_update_id = match current_update_id.checked_add(1) {
469                                 Some(next_update_id) => next_update_id,
470                                 None => break,
471                         };
472                         let update_name = UpdateName::from(current_update_id);
473                         let update = match self.read_monitor_update(&monitor_name, &update_name) {
474                                 Ok(update) => update,
475                                 Err(err) if err.kind() == io::ErrorKind::NotFound => {
476                                         // We can't find any more updates, so we are done.
477                                         break;
478                                 }
479                                 Err(err) => return Err(err),
480                         };
481
482                         monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger)
483                                 .map_err(|e| {
484                                         log_error!(
485                                                 self.logger,
486                                                 "Monitor update failed. monitor: {} update: {} reason: {:?}",
487                                                 monitor_name.as_str(),
488                                                 update_name.as_str(),
489                                                 e
490                                         );
491                                         io::Error::new(io::ErrorKind::Other, "Monitor update failed")
492                                 })?;
493                 }
494                 Ok((block_hash, monitor))
495         }
496
497         /// Read a channel monitor.
498         fn read_monitor(
499                 &self, monitor_name: &MonitorName,
500         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error> {
501                 let outpoint: OutPoint = monitor_name.try_into()?;
502                 let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
503                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
504                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
505                         monitor_name.as_str(),
506                 )?);
507                 // Discard the sentinel bytes if found.
508                 if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
509                         monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
510                 }
511                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
512                         &mut monitor_cursor,
513                         (&*self.entropy_source, &*self.signer_provider),
514                 ) {
515                         Ok((blockhash, channel_monitor)) => {
516                                 if channel_monitor.get_funding_txo().0.txid != outpoint.txid
517                                         || channel_monitor.get_funding_txo().0.index != outpoint.index
518                                 {
519                                         log_error!(
520                                                 self.logger,
521                                                 "ChannelMonitor {} was stored under the wrong key!",
522                                                 monitor_name.as_str()
523                                         );
524                                         Err(io::Error::new(
525                                                 io::ErrorKind::InvalidData,
526                                                 "ChannelMonitor was stored under the wrong key",
527                                         ))
528                                 } else {
529                                         Ok((blockhash, channel_monitor))
530                                 }
531                         }
532                         Err(e) => {
533                                 log_error!(
534                                         self.logger,
535                                         "Failed to read ChannelMonitor {}, reason: {}",
536                                         monitor_name.as_str(),
537                                         e,
538                                 );
539                                 Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
540                         }
541                 }
542         }
543
544         /// Read a channel monitor update.
545         fn read_monitor_update(
546                 &self, monitor_name: &MonitorName, update_name: &UpdateName,
547         ) -> Result<ChannelMonitorUpdate, io::Error> {
548                 let update_bytes = self.kv_store.read(
549                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
550                         monitor_name.as_str(),
551                         update_name.as_str(),
552                 )?;
553                 ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
554                         log_error!(
555                                 self.logger,
556                                 "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
557                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
558                                 monitor_name.as_str(),
559                                 update_name.as_str(),
560                                 e,
561                         );
562                         io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
563                 })
564         }
565
566         /// Cleans up stale updates for all monitors.
567         ///
568         /// This function works by first listing all monitors, and then for each of them, listing all
569         /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
570         /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
571         /// be passed to [`KVStore::remove`].
572         pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
573                 let monitor_keys = self.kv_store.list(
574                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
575                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
576                 )?;
577                 for monitor_key in monitor_keys {
578                         let monitor_name = MonitorName::new(monitor_key)?;
579                         let (_, current_monitor) = self.read_monitor(&monitor_name)?;
580                         let updates = self
581                                 .kv_store
582                                 .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str())?;
583                         for update in updates {
584                                 let update_name = UpdateName::new(update)?;
585                                 // if the update_id is lower than the stored monitor, delete
586                                 if update_name.0 <= current_monitor.get_latest_update_id() {
587                                         self.kv_store.remove(
588                                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
589                                                 monitor_name.as_str(),
590                                                 update_name.as_str(),
591                                                 lazy,
592                                         )?;
593                                 }
594                         }
595                 }
596                 Ok(())
597         }
598 }
599
600 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref> 
601         Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
602 where
603         K::Target: KVStore,
604         L::Target: Logger,
605         ES::Target: EntropySource + Sized,
606         SP::Target: SignerProvider + Sized,
607 {
608         /// Persists a new channel. This means writing the entire monitor to the
609         /// parametrized [`KVStore`].
610         fn persist_new_channel(
611                 &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
612                 _monitor_update_call_id: MonitorUpdateId,
613         ) -> chain::ChannelMonitorUpdateStatus {
614                 // Determine the proper key for this monitor
615                 let monitor_name = MonitorName::from(funding_txo);
616                 let maybe_old_monitor = self.read_monitor(&monitor_name);
617                 match maybe_old_monitor {
618                         Ok((_, ref old_monitor)) => {
619                                 // Check that this key isn't already storing a monitor with a higher update_id
620                                 // (collision)
621                                 if old_monitor.get_latest_update_id() > monitor.get_latest_update_id() {
622                                         log_error!(
623                                                 self.logger,
624                                                 "Tried to write a monitor at the same outpoint {} with a higher update_id!",
625                                                 monitor_name.as_str()
626                                         );
627                                         return chain::ChannelMonitorUpdateStatus::UnrecoverableError;
628                                 }
629                         }
630                         // This means the channel monitor is new.
631                         Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
632                         _ => return chain::ChannelMonitorUpdateStatus::UnrecoverableError,
633                 }
634                 // Serialize and write the new monitor
635                 let mut monitor_bytes = Vec::with_capacity(
636                         MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
637                 );
638                 monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
639                 monitor.write(&mut monitor_bytes).unwrap();
640                 match self.kv_store.write(
641                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
642                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
643                         monitor_name.as_str(),
644                         &monitor_bytes,
645                 ) {
646                         Ok(_) => {
647                                 // Assess cleanup. Typically, we'll clean up only between the last two known full
648                                 // monitors.
649                                 if let Ok((_, old_monitor)) = maybe_old_monitor {
650                                         let start = old_monitor.get_latest_update_id();
651                                         let end = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
652                                                 // We don't want to clean the rest of u64, so just do possible pending
653                                                 // updates. Note that we never write updates at
654                                                 // `CLOSED_CHANNEL_UPDATE_ID`.
655                                                 cmp::min(
656                                                         start.saturating_add(self.maximum_pending_updates),
657                                                         CLOSED_CHANNEL_UPDATE_ID - 1,
658                                                 )
659                                         } else {
660                                                 monitor.get_latest_update_id().saturating_sub(1)
661                                         };
662                                         // We should bother cleaning up only if there's at least one update
663                                         // expected.
664                                         for update_id in start..=end {
665                                                 let update_name = UpdateName::from(update_id);
666                                                 #[cfg(debug_assertions)]
667                                                 {
668                                                         if let Ok(update) =
669                                                                 self.read_monitor_update(&monitor_name, &update_name)
670                                                         {
671                                                                 // Assert that we are reading what we think we are.
672                                                                 debug_assert_eq!(update.update_id, update_name.0);
673                                                         } else if update_id != start && monitor.get_latest_update_id() != CLOSED_CHANNEL_UPDATE_ID
674                                                         {
675                                                                 // We're deleting something we should know doesn't exist.
676                                                                 panic!(
677                                                                         "failed to read monitor update {}",
678                                                                         update_name.as_str()
679                                                                 );
680                                                         }
681                                                         // On closed channels, we will unavoidably try to read
682                                                         // non-existent updates since we have to guess at the range of
683                                                         // stale updates, so do nothing.
684                                                 }
685                                                 if let Err(e) = self.kv_store.remove(
686                                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
687                                                         monitor_name.as_str(),
688                                                         update_name.as_str(),
689                                                         true,
690                                                 ) {
691                                                         log_error!(
692                                                                 self.logger,
693                                                                 "error cleaning up channel monitor updates for monitor {}, reason: {}",
694                                                                 monitor_name.as_str(),
695                                                                 e
696                                                         );
697                                                 };
698                                         }
699                                 };
700                                 chain::ChannelMonitorUpdateStatus::Completed
701                         }
702                         Err(e) => {
703                                 log_error!(
704                                         self.logger,
705                                         "error writing channel monitor {}/{}/{} reason: {}",
706                                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
707                                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
708                                         monitor_name.as_str(),
709                                         e
710                                 );
711                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
712                         }
713                 }
714         }
715
716         /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
717         ///
718         /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
719         ///
720         ///   - No full monitor is found in [`KVStore`]
721         ///   - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
722         ///   - LDK commands re-persisting the entire monitor through this function, specifically when
723         ///     `update` is `None`.
724         ///   - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
725         fn update_persisted_channel(
726                 &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
727                 monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
728         ) -> chain::ChannelMonitorUpdateStatus {
729                 // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
730                 // ChannelMonitorUpdate's update_id.
731                 if let Some(update) = update {
732                         if update.update_id != CLOSED_CHANNEL_UPDATE_ID
733                                 && update.update_id % self.maximum_pending_updates != 0
734                         {
735                                 let monitor_name = MonitorName::from(funding_txo);
736                                 let update_name = UpdateName::from(update.update_id);
737                                 match self.kv_store.write(
738                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
739                                         monitor_name.as_str(),
740                                         update_name.as_str(),
741                                         &update.encode(),
742                                 ) {
743                                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
744                                         Err(e) => {
745                                                 log_error!(
746                                                         self.logger,
747                                                         "error writing channel monitor update {}/{}/{} reason: {}",
748                                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
749                                                         monitor_name.as_str(),
750                                                         update_name.as_str(),
751                                                         e
752                                                 );
753                                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
754                                         }
755                                 }
756                         } else {
757                                 // We could write this update, but it meets criteria of our design that call for a full monitor write.
758                                 self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
759                         }
760                 } else {
761                         // There is no update given, so we must persist a new monitor.
762                         self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
763                 }
764         }
765 }
766
767 /// A struct representing a name for a monitor.
768 #[derive(Debug)]
769 struct MonitorName(String);
770
771 impl MonitorName {
772         /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
773         /// be formed from the given `name`.
774         pub fn new(name: String) -> Result<Self, io::Error> {
775                 MonitorName::do_try_into_outpoint(&name)?;
776                 Ok(Self(name))
777         }
778         /// Convert this monitor name to a str.
779         pub fn as_str(&self) -> &str {
780                 &self.0
781         }
782         /// Attempt to form a valid [`OutPoint`] from a given name string.
783         fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
784                 let mut parts = name.splitn(2, '_');
785                 let txid = if let Some(part) = parts.next() {
786                         Txid::from_hex(part).map_err(|_| {
787                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
788                         })?
789                 } else {
790                         return Err(io::Error::new(
791                                 io::ErrorKind::InvalidData,
792                                 "Stored monitor key is not a splittable string",
793                         ));
794                 };
795                 let index = if let Some(part) = parts.next() {
796                         part.parse().map_err(|_| {
797                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
798                         })?
799                 } else {
800                         return Err(io::Error::new(
801                                 io::ErrorKind::InvalidData,
802                                 "No tx index value found after underscore in stored key",
803                         ));
804                 };
805                 Ok(OutPoint { txid, index })
806         }
807 }
808
809 impl TryFrom<&MonitorName> for OutPoint {
810         type Error = io::Error;
811
812         fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
813                 MonitorName::do_try_into_outpoint(&value.0)
814         }
815 }
816
817 impl From<OutPoint> for MonitorName {
818         fn from(value: OutPoint) -> Self {
819                 MonitorName(format!("{}_{}", value.txid.to_hex(), value.index))
820         }
821 }
822
823 /// A struct representing a name for an update.
824 #[derive(Debug)]
825 struct UpdateName(u64, String);
826
827 impl UpdateName {
828         /// Constructs an [`UpdateName`], after verifying that an update sequence ID
829         /// can be derived from the given `name`.
830         pub fn new(name: String) -> Result<Self, io::Error> {
831                 match name.parse::<u64>() {
832                         Ok(u) => Ok(u.into()),
833                         Err(_) => {
834                                 Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
835                         }
836                 }
837         }
838
839         /// Convert this monitor update name to a &str
840         pub fn as_str(&self) -> &str {
841                 &self.1
842         }
843 }
844
845 impl From<u64> for UpdateName {
846         fn from(value: u64) -> Self {
847                 Self(value, value.to_string())
848         }
849 }
850
851 #[cfg(test)]
852 mod tests {
853         use super::*;
854         use crate::chain::chainmonitor::Persist;
855         use crate::chain::ChannelMonitorUpdateStatus;
856         use crate::events::{ClosureReason, MessageSendEventsProvider};
857         use crate::ln::functional_test_utils::*;
858         use crate::util::test_utils::{self, TestLogger, TestStore};
859         use crate::{check_added_monitors, check_closed_broadcast};
860
861         const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
862
863         #[test]
864         fn converts_u64_to_update_name() {
865                 assert_eq!(UpdateName::from(0).as_str(), "0");
866                 assert_eq!(UpdateName::from(21).as_str(), "21");
867                 assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
868         }
869
870         #[test]
871         fn bad_update_name_fails() {
872                 assert!(UpdateName::new("deadbeef".to_string()).is_err());
873                 assert!(UpdateName::new("-1".to_string()).is_err());
874         }
875
876         #[test]
877         fn monitor_from_outpoint_works() {
878                 let monitor_name1 = MonitorName::from(OutPoint {
879                         txid: Txid::from_hex("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
880                         index: 1,
881                 });
882                 assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
883
884                 let monitor_name2 = MonitorName::from(OutPoint {
885                         txid: Txid::from_hex("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
886                         index: u16::MAX,
887                 });
888                 assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
889         }
890
891         #[test]
892         fn bad_monitor_string_fails() {
893                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
894                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
895                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
896         }
897
898         // Exercise the `MonitorUpdatingPersister` with real channels and payments.
899         #[test]
900         fn persister_with_real_monitors() {
901                 // This value is used later to limit how many iterations we perform.
902                 let test_max_pending_updates = 7;
903                 let chanmon_cfgs = create_chanmon_cfgs(4);
904                 let persister_0 = MonitorUpdatingPersister {
905                         kv_store: &TestStore::new(false),
906                         logger: &TestLogger::new(),
907                         maximum_pending_updates: test_max_pending_updates,
908                         entropy_source: &chanmon_cfgs[0].keys_manager,
909                         signer_provider: &chanmon_cfgs[0].keys_manager,
910                 };
911                 let persister_1 = MonitorUpdatingPersister {
912                         kv_store: &TestStore::new(false),
913                         logger: &TestLogger::new(),
914                         // Intentionally set this to a smaller value to test a different alignment.
915                         maximum_pending_updates: 3,
916                         entropy_source: &chanmon_cfgs[1].keys_manager,
917                         signer_provider: &chanmon_cfgs[1].keys_manager,
918                 };
919                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
920                 let chain_mon_0 = test_utils::TestChainMonitor::new(
921                         Some(&chanmon_cfgs[0].chain_source),
922                         &chanmon_cfgs[0].tx_broadcaster,
923                         &chanmon_cfgs[0].logger,
924                         &chanmon_cfgs[0].fee_estimator,
925                         &persister_0,
926                         &chanmon_cfgs[0].keys_manager,
927                 );
928                 let chain_mon_1 = test_utils::TestChainMonitor::new(
929                         Some(&chanmon_cfgs[1].chain_source),
930                         &chanmon_cfgs[1].tx_broadcaster,
931                         &chanmon_cfgs[1].logger,
932                         &chanmon_cfgs[1].fee_estimator,
933                         &persister_1,
934                         &chanmon_cfgs[1].keys_manager,
935                 );
936                 node_cfgs[0].chain_monitor = chain_mon_0;
937                 node_cfgs[1].chain_monitor = chain_mon_1;
938                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
939                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
940
941                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
942                 let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
943
944                 // Check that the persisted channel data is empty before any channels are
945                 // open.
946                 let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
947                         broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
948                 assert_eq!(persisted_chan_data_0.len(), 0);
949                 let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
950                         broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
951                 assert_eq!(persisted_chan_data_1.len(), 0);
952
953                 // Helper to make sure the channel is on the expected update ID.
954                 macro_rules! check_persisted_data {
955                         ($expected_update_id: expr) => {
956                                 persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
957                                         broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
958                                 // check that we stored only one monitor
959                                 assert_eq!(persisted_chan_data_0.len(), 1);
960                                 for (_, mon) in persisted_chan_data_0.iter() {
961                                         // check that when we read it, we got the right update id
962                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
963                                         // if the CM is at the correct update id without updates, ensure no updates are stored
964                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
965                                         let (_, cm_0) = persister_0.read_monitor(&monitor_name).unwrap();
966                                         if cm_0.get_latest_update_id() == $expected_update_id {
967                                                 assert_eq!(
968                                                         persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
969                                                                 monitor_name.as_str()).unwrap().len(),
970                                                         0,
971                                                         "updates stored when they shouldn't be in persister 0"
972                                                 );
973                                         }
974                                 }
975                                 persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
976                                         broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
977                                 assert_eq!(persisted_chan_data_1.len(), 1);
978                                 for (_, mon) in persisted_chan_data_1.iter() {
979                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
980                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
981                                         let (_, cm_1) = persister_1.read_monitor(&monitor_name).unwrap();
982                                         if cm_1.get_latest_update_id() == $expected_update_id {
983                                                 assert_eq!(
984                                                         persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
985                                                                 monitor_name.as_str()).unwrap().len(),
986                                                         0,
987                                                         "updates stored when they shouldn't be in persister 1"
988                                                 );
989                                         }
990                                 }
991                         };
992                 }
993
994                 // Create some initial channel and check that a channel was persisted.
995                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
996                 check_persisted_data!(0);
997
998                 // Send a few payments and make sure the monitors are updated to the latest.
999                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1000                 check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
1001                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1002                 check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
1003
1004                 // Send a few more payments to try all the alignments of max pending updates with
1005                 // updates for a payment sent and received.
1006                 let mut sender = 0;
1007                 for i in 3..=test_max_pending_updates * 2 {
1008                         let receiver;
1009                         if sender == 0 {
1010                                 sender = 1;
1011                                 receiver = 0;
1012                         } else {
1013                                 sender = 0;
1014                                 receiver = 1;
1015                         }
1016                         send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1017                         check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1018                 }
1019
1020                 // Force close because cooperative close doesn't result in any persisted
1021                 // updates.
1022                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1023
1024                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1025                 check_closed_broadcast!(nodes[0], true);
1026                 check_added_monitors!(nodes[0], 1);
1027
1028                 let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1029                 assert_eq!(node_txn.len(), 1);
1030
1031                 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
1032
1033                 check_closed_broadcast!(nodes[1], true);
1034                 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
1035                 check_added_monitors!(nodes[1], 1);
1036
1037                 // Make sure everything is persisted as expected after close.
1038                 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
1039
1040                 // Make sure the expected number of stale updates is present.
1041                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1042                 let (_, monitor) = &persisted_chan_data[0];
1043                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1044                 // The channel should have 0 updates, as it wrote a full monitor and consolidated.
1045                 assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1046                 assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1047         }
1048
1049         // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1050         // monitor or update with it results in the persister returning an UnrecoverableError status.
1051         #[test]
1052         fn unrecoverable_error_on_write_failure() {
1053                 // Set up a dummy channel and force close. This will produce a monitor
1054                 // that we can then use to test persistence.
1055                 let chanmon_cfgs = create_chanmon_cfgs(2);
1056                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1057                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1058                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1059                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1060                 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
1061                 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
1062                 {
1063                         let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1064                         let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
1065                         let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
1066                         let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1067                         let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
1068                         let test_txo = OutPoint { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
1069
1070                         let ro_persister = MonitorUpdatingPersister {
1071                                 kv_store: &TestStore::new(true),
1072                                 logger: &TestLogger::new(),
1073                                 maximum_pending_updates: 11,
1074                                 entropy_source: node_cfgs[0].keys_manager,
1075                                 signer_provider: node_cfgs[0].keys_manager,
1076                         };
1077                         match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
1078                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1079                                         // correct result
1080                                 }
1081                                 ChannelMonitorUpdateStatus::Completed => {
1082                                         panic!("Completed persisting new channel when shouldn't have")
1083                                 }
1084                                 ChannelMonitorUpdateStatus::InProgress => {
1085                                         panic!("Returned InProgress when shouldn't have")
1086                                 }
1087                         }
1088                         match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
1089                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1090                                         // correct result
1091                                 }
1092                                 ChannelMonitorUpdateStatus::Completed => {
1093                                         panic!("Completed persisting new channel when shouldn't have")
1094                                 }
1095                                 ChannelMonitorUpdateStatus::InProgress => {
1096                                         panic!("Returned InProgress when shouldn't have")
1097                                 }
1098                         }
1099                         added_monitors.clear();
1100                 }
1101                 nodes[1].node.get_and_clear_pending_msg_events();
1102         }
1103
1104         // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1105         #[test]
1106         fn clean_stale_updates_works() {
1107                 let test_max_pending_updates = 7;
1108                 let chanmon_cfgs = create_chanmon_cfgs(3);
1109                 let persister_0 = MonitorUpdatingPersister {
1110                         kv_store: &TestStore::new(false),
1111                         logger: &TestLogger::new(),
1112                         maximum_pending_updates: test_max_pending_updates,
1113                         entropy_source: &chanmon_cfgs[0].keys_manager,
1114                         signer_provider: &chanmon_cfgs[0].keys_manager,
1115                 };
1116                 let persister_1 = MonitorUpdatingPersister {
1117                         kv_store: &TestStore::new(false),
1118                         logger: &TestLogger::new(),
1119                         maximum_pending_updates: test_max_pending_updates,
1120                         entropy_source: &chanmon_cfgs[1].keys_manager,
1121                         signer_provider: &chanmon_cfgs[1].keys_manager,
1122                 };
1123                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1124                 let chain_mon_0 = test_utils::TestChainMonitor::new(
1125                         Some(&chanmon_cfgs[0].chain_source),
1126                         &chanmon_cfgs[0].tx_broadcaster,
1127                         &chanmon_cfgs[0].logger,
1128                         &chanmon_cfgs[0].fee_estimator,
1129                         &persister_0,
1130                         &chanmon_cfgs[0].keys_manager,
1131                 );
1132                 let chain_mon_1 = test_utils::TestChainMonitor::new(
1133                         Some(&chanmon_cfgs[1].chain_source),
1134                         &chanmon_cfgs[1].tx_broadcaster,
1135                         &chanmon_cfgs[1].logger,
1136                         &chanmon_cfgs[1].fee_estimator,
1137                         &persister_1,
1138                         &chanmon_cfgs[1].keys_manager,
1139                 );
1140                 node_cfgs[0].chain_monitor = chain_mon_0;
1141                 node_cfgs[1].chain_monitor = chain_mon_1;
1142                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1143                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1144
1145                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
1146
1147                 // Check that the persisted channel data is empty before any channels are
1148                 // open.
1149                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1150                 assert_eq!(persisted_chan_data.len(), 0);
1151
1152                 // Create some initial channel
1153                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1154
1155                 // Send a few payments to advance the updates a bit
1156                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1157                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1158
1159                 // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1160                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1161                 let (_, monitor) = &persisted_chan_data[0];
1162                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1163                 persister_0
1164                         .kv_store
1165                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
1166                         .unwrap();
1167
1168                 // Do the stale update cleanup
1169                 persister_0.cleanup_stale_updates(false).unwrap();
1170
1171                 // Confirm the stale update is unreadable/gone
1172                 assert!(persister_0
1173                         .kv_store
1174                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
1175                         .is_err());
1176
1177                 // Force close.
1178                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1179                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1180                 check_closed_broadcast!(nodes[0], true);
1181                 check_added_monitors!(nodes[0], 1);
1182
1183                 // Write an update near u64::MAX
1184                 persister_0
1185                         .kv_store
1186                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
1187                         .unwrap();
1188
1189                 // Do the stale update cleanup
1190                 persister_0.cleanup_stale_updates(false).unwrap();
1191
1192                 // Confirm the stale update is unreadable/gone
1193                 assert!(persister_0
1194                         .kv_store
1195                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
1196                         .is_err());
1197         }
1198 }