f3b8187fc641b88102c808b34d0a76683852e594
[rust-lightning] / lightning / src / util / persist.rs
1 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4 // You may not use this file except in accordance with one or both of these
5 // licenses.
6
7 //! This module contains a simple key-value store trait [`KVStore`] that
8 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9 //! and [`ChannelMonitor`] all in one place.
10
11 use core::cmp;
12 use core::convert::{TryFrom, TryInto};
13 use core::ops::Deref;
14 use bitcoin::hashes::hex::{FromHex, ToHex};
15 use bitcoin::{BlockHash, Txid};
16
17 use crate::{io, log_error};
18 use crate::alloc::string::ToString;
19 use crate::prelude::*;
20
21 use crate::chain;
22 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
23 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
24 use crate::sign::{EntropySource, NodeSigner, WriteableEcdsaChannelSigner, SignerProvider};
25 use crate::chain::transaction::OutPoint;
26 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
27 use crate::ln::channelmanager::ChannelManager;
28 use crate::routing::router::Router;
29 use crate::routing::gossip::NetworkGraph;
30 use crate::routing::scoring::WriteableScore;
31 use crate::util::logger::Logger;
32 use crate::util::ser::{Readable, ReadableArgs, Writeable};
33
34 /// The alphabet of characters allowed for namespaces and keys.
35 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
36
37 /// The maximum number of characters namespaces and keys may have.
38 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
39
40 /// The namespace under which the [`ChannelManager`] will be persisted.
41 pub const CHANNEL_MANAGER_PERSISTENCE_NAMESPACE: &str = "";
42 /// The secondary-namespace under which the [`ChannelManager`] will be persisted.
43 pub const CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE: &str = "";
44 /// The key under which the [`ChannelManager`] will be persisted.
45 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
46
47 /// The namespace under which [`ChannelMonitor`]s will be persisted.
48 pub const CHANNEL_MONITOR_PERSISTENCE_NAMESPACE: &str = "monitors";
49 /// The secondary-namespace under which [`ChannelMonitor`]s will be persisted.
50 pub const CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE: &str = "";
51 /// The namespace under which [`ChannelMonitorUpdate`]s will be persisted.
52 pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE: &str = "monitor_updates";
53
54 /// The namespace under which the [`NetworkGraph`] will be persisted.
55 pub const NETWORK_GRAPH_PERSISTENCE_NAMESPACE: &str = "";
56 /// The secondary-namespace under which the [`NetworkGraph`] will be persisted.
57 pub const NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE: &str = "";
58 /// The key under which the [`NetworkGraph`] will be persisted.
59 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
60
61 /// The namespace under which the [`WriteableScore`] will be persisted.
62 pub const SCORER_PERSISTENCE_NAMESPACE: &str = "";
63 /// The secondary-namespace under which the [`WriteableScore`] will be persisted.
64 pub const SCORER_PERSISTENCE_SUB_NAMESPACE: &str = "";
65 /// The key under which the [`WriteableScore`] will be persisted.
66 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
67
68 /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
69 ///
70 /// This serves to prevent someone from accidentally loading such monitors (which may need
71 /// updates applied to be current) with another implementation.
72 pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
73
74 /// Provides an interface that allows storage and retrieval of persisted values that are associated
75 /// with given keys.
76 ///
77 /// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
78 /// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
79 /// ways, as long as per-namespace key uniqueness is asserted.
80 ///
81 /// Keys and namespaces are required to be valid ASCII strings in the range of
82 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
83 /// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
84 /// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
85 /// that concerns should always be separated by primary namespace first, before secondary
86 /// namespaces are used. While the number of primary namespaces will be relatively small and is
87 /// determined at compile time, there may be many secondary namespaces per primary namespace. Note
88 /// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
89 /// namespace, i.e., conflicts between keys and equally named
90 /// primary namespaces/secondary namespaces must be avoided.
91 ///
92 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
93 /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
94 /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
95 pub trait KVStore {
96         /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
97         /// `key`.
98         ///
99         /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
100         /// `primary_namespace` and `secondary_namespace`.
101         ///
102         /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
103         fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
104         /// Persists the given data under the given `key`.
105         ///
106         /// Will create the given `primary_namespace` and `secondary_namespace` if not already present
107         /// in the store.
108         fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
109         /// Removes any data that had previously been persisted under the given `key`.
110         ///
111         /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
112         /// remove the given `key` at some point in time after the method returns, e.g., as part of an
113         /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
114         /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
115         ///
116         /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
117         /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
118         /// potentially get lost on crash after the method returns. Therefore, this flag should only be
119         /// set for `remove` operations that can be safely replayed at a later time.
120         ///
121         /// Returns successfully if no data will be stored for the given `primary_namespace`,
122         /// `secondary_namespace`, and `key`, independently of whether it was present before its
123         /// invokation or not.
124         fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
125         /// Returns a list of keys that are stored under the given `secondary_namespace` in
126         /// `primary_namespace`.
127         ///
128         /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
129         /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
130         fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
131 }
132
133 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
134 pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
135         where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
136                 T::Target: 'static + BroadcasterInterface,
137                 ES::Target: 'static + EntropySource,
138                 NS::Target: 'static + NodeSigner,
139                 SP::Target: 'static + SignerProvider,
140                 F::Target: 'static + FeeEstimator,
141                 R::Target: 'static + Router,
142                 L::Target: 'static + Logger,
143 {
144         /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
145         fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error>;
146
147         /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
148         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
149
150         /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
151         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
152 }
153
154
155 impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
156         where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
157                 T::Target: 'static + BroadcasterInterface,
158                 ES::Target: 'static + EntropySource,
159                 NS::Target: 'static + NodeSigner,
160                 SP::Target: 'static + SignerProvider,
161                 F::Target: 'static + FeeEstimator,
162                 R::Target: 'static + Router,
163                 L::Target: 'static + Logger,
164 {
165         /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
166         fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
167                 self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
168                                    CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
169                                    CHANNEL_MANAGER_PERSISTENCE_KEY,
170                                    &channel_manager.encode())
171         }
172
173         /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
174         fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
175                 self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
176                                    NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
177                                    NETWORK_GRAPH_PERSISTENCE_KEY,
178                                    &network_graph.encode())
179         }
180
181         /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
182         fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
183                 self.write(SCORER_PERSISTENCE_NAMESPACE,
184                                    SCORER_PERSISTENCE_SUB_NAMESPACE,
185                                    SCORER_PERSISTENCE_KEY,
186                                    &scorer.encode())
187         }
188 }
189
190 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
191         // TODO: We really need a way for the persister to inform the user that its time to crash/shut
192         // down once these start returning failure.
193         // Then we should return InProgress rather than UnrecoverableError, implying we should probably
194         // just shut down the node since we're not retrying persistence!
195
196         fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
197                 let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
198                 match self.write(
199                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
200                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
201                         &key, &monitor.encode())
202                 {
203                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
204                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
205                 }
206         }
207
208         fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
209                 let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
210                 match self.write(
211                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
212                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
213                         &key, &monitor.encode())
214                 {
215                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
216                         Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
217                 }
218         }
219 }
220
221 /// Read previously persisted [`ChannelMonitor`]s from the store.
222 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
223         kv_store: K, entropy_source: ES, signer_provider: SP,
224 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
225 where
226         K::Target: KVStore,
227         ES::Target: EntropySource + Sized,
228         SP::Target: SignerProvider + Sized,
229 {
230         let mut res = Vec::new();
231
232         for stored_key in kv_store.list(
233                 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
234         {
235                 if stored_key.len() < 66 {
236                         return Err(io::Error::new(
237                                 io::ErrorKind::InvalidData,
238                                 "Stored key has invalid length"));
239                 }
240
241                 let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
242                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
243                 })?;
244
245                 let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
246                         io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
247                 })?;
248
249                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
250                         &mut io::Cursor::new(
251                                 kv_store.read(CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE, &stored_key)?),
252                         (&*entropy_source, &*signer_provider),
253                 ) {
254                         Ok((block_hash, channel_monitor)) => {
255                                 if channel_monitor.get_funding_txo().0.txid != txid
256                                         || channel_monitor.get_funding_txo().0.index != index
257                                 {
258                                         return Err(io::Error::new(
259                                                 io::ErrorKind::InvalidData,
260                                                 "ChannelMonitor was stored under the wrong key",
261                                         ));
262                                 }
263                                 res.push((block_hash, channel_monitor));
264                         }
265                         Err(_) => {
266                                 return Err(io::Error::new(
267                                         io::ErrorKind::InvalidData,
268                                         "Failed to read ChannelMonitor"
269                                 ))
270                         }
271                 }
272         }
273         Ok(res)
274 }
275
276 /// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
277 /// [`ChannelMonitorUpdate`]s.
278 ///
279 /// # Overview
280 ///
281 /// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
282 /// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
283 /// deleting) and complexity. This is because it writes channel monitor differential updates,
284 /// whereas the other (default) implementation rewrites the entire monitor on each update. For
285 /// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
286 /// of megabytes (or more). Updates can be as small as a few hundred bytes.
287 ///
288 /// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
289 /// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
290 /// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
291 /// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
292 /// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
293 /// sentinel bytes.
294 ///
295 /// # Storing monitors
296 ///
297 /// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
298 ///
299 ///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
300 ///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
301 ///
302 /// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_NAMESPACE`], using the
303 /// familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
304 ///
305 /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
306 ///
307 ///   - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE`]
308 ///   - secondary namespace: [the monitor's encoded outpoint name]
309 ///
310 /// Under that secondary namespace, each update is stored with a number string, like `21`, which
311 /// represents its `update_id` value.
312 ///
313 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
314 ///
315 ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
316 ///   - Index: `1`
317 ///
318 /// Full channel monitors would be stored at a single key:
319 ///
320 /// `[CHANNEL_MONITOR_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
321 ///
322 /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
323 ///
324 /// ```text
325 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
326 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
327 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
328 /// ```
329 /// ... and so on.
330 ///
331 /// # Reading channel state from storage
332 ///
333 /// Channel state can be reconstructed by calling
334 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
335 /// list channel monitors themselves and load channels individually using
336 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
337 /// 
338 /// ## EXTREMELY IMPORTANT
339 /// 
340 /// It is extremely important that your [`KVStore::read`] implementation uses the
341 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
342 /// that circumstance (not when there is really a permissions error, for example). This is because
343 /// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
344 /// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
345 /// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
346 ///
347 /// # Pruning stale channel updates
348 ///
349 /// Stale updates are pruned when a full monitor is written. The old monitor is first read, and if
350 /// that succeeds, updates in the range between the old and new monitors are deleted. The `lazy`
351 /// flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
352 /// will complete. However, stale updates are not a problem for data integrity, since updates are
353 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
354 ///
355 /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
356 /// would like to get rid of them, consider using the
357 /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
358 pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
359 where
360         K::Target: KVStore,
361         L::Target: Logger,
362         ES::Target: EntropySource + Sized,
363         SP::Target: SignerProvider + Sized,
364 {
365         kv_store: K,
366         logger: L,
367         maximum_pending_updates: u64,
368         entropy_source: ES,
369         signer_provider: SP,
370 }
371
372 #[allow(dead_code)]
373 impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
374         MonitorUpdatingPersister<K, L, ES, SP>
375 where
376         K::Target: KVStore,
377         L::Target: Logger,
378         ES::Target: EntropySource + Sized,
379         SP::Target: SignerProvider + Sized,
380 {
381         /// Constructs a new [`MonitorUpdatingPersister`].
382         ///
383         /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
384         /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
385         /// consolidation will frequently occur with fewer updates than what you set here; this number
386         /// is merely the maximum that may be stored. When setting this value, consider that for higher
387         /// values of `maximum_pending_updates`:
388         /// 
389         ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
390         /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
391         /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
392         ///   - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
393         /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
394         /// less frequent "waves."
395         ///   - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
396         /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
397         pub fn new(
398                 kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
399                 signer_provider: SP,
400         ) -> Self
401         where
402                 ES::Target: EntropySource + Sized,
403                 SP::Target: SignerProvider + Sized,
404         {
405                 MonitorUpdatingPersister {
406                         kv_store,
407                         logger,
408                         maximum_pending_updates,
409                         entropy_source,
410                         signer_provider,
411                 }
412         }
413
414         /// Reads all stored channel monitors, along with any stored updates for them.
415         ///
416         /// It is extremely important that your [`KVStore::read`] implementation uses the
417         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
418         /// documentation for [`MonitorUpdatingPersister`].
419         pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref + Clone>(
420                 &self, broadcaster: B, fee_estimator: F,
421         ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
422         where
423                 ES::Target: EntropySource + Sized,
424                 SP::Target: SignerProvider + Sized,
425                 B::Target: BroadcasterInterface,
426                 F::Target: FeeEstimator,
427         {
428                 let monitor_list = self.kv_store.list(
429                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
430                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
431                 )?;
432                 let mut res = Vec::with_capacity(monitor_list.len());
433                 for monitor_key in monitor_list {
434                         res.push(self.read_channel_monitor_with_updates(
435                                 &broadcaster,
436                                 fee_estimator.clone(),
437                                 monitor_key,
438                         )?)
439                 }
440                 Ok(res)
441         }
442
443         /// Read a single channel monitor, along with any stored updates for it.
444         ///
445         /// It is extremely important that your [`KVStore::read`] implementation uses the
446         /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
447         /// documentation for [`MonitorUpdatingPersister`].
448         ///
449         /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
450         /// [`OutPoint`], with an underscore `_` between them. For example, given:
451         ///
452         ///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
453         ///   - Index: `1`
454         ///
455         /// The correct `monitor_key` would be:
456         /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
457         /// 
458         /// Loading a large number of monitors will be faster if done in parallel. You can use this
459         /// function to accomplish this. Take care to limit the number of parallel readers.
460         pub fn read_channel_monitor_with_updates<B: Deref, F: Deref + Clone>(
461                 &self, broadcaster: &B, fee_estimator: F, monitor_key: String,
462         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
463         where
464                 ES::Target: EntropySource + Sized,
465                 SP::Target: SignerProvider + Sized,
466                 B::Target: BroadcasterInterface,
467                 F::Target: FeeEstimator,
468         {
469                 let monitor_name = MonitorName::new(monitor_key)?;
470                 let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
471                 let mut current_update_id = monitor.get_latest_update_id();
472                 loop {
473                         current_update_id = match current_update_id.checked_add(1) {
474                                 Some(next_update_id) => next_update_id,
475                                 None => break,
476                         };
477                         let update_name = UpdateName::from(current_update_id);
478                         let update = match self.read_monitor_update(&monitor_name, &update_name) {
479                                 Ok(update) => update,
480                                 Err(err) if err.kind() == io::ErrorKind::NotFound => {
481                                         // We can't find any more updates, so we are done.
482                                         break;
483                                 }
484                                 Err(err) => return Err(err),
485                         };
486
487                         monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger)
488                                 .map_err(|e| {
489                                         log_error!(
490                                                 self.logger,
491                                                 "Monitor update failed. monitor: {} update: {} reason: {:?}",
492                                                 monitor_name.as_str(),
493                                                 update_name.as_str(),
494                                                 e
495                                         );
496                                         io::Error::new(io::ErrorKind::Other, "Monitor update failed")
497                                 })?;
498                 }
499                 Ok((block_hash, monitor))
500         }
501
502         /// Read a channel monitor.
503         fn read_monitor(
504                 &self, monitor_name: &MonitorName,
505         ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error> {
506                 let outpoint: OutPoint = monitor_name.try_into()?;
507                 let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
508                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
509                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
510                         monitor_name.as_str(),
511                 )?);
512                 // Discard the sentinel bytes if found.
513                 if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
514                         monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
515                 }
516                 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
517                         &mut monitor_cursor,
518                         (&*self.entropy_source, &*self.signer_provider),
519                 ) {
520                         Ok((blockhash, channel_monitor)) => {
521                                 if channel_monitor.get_funding_txo().0.txid != outpoint.txid
522                                         || channel_monitor.get_funding_txo().0.index != outpoint.index
523                                 {
524                                         log_error!(
525                                                 self.logger,
526                                                 "ChannelMonitor {} was stored under the wrong key!",
527                                                 monitor_name.as_str()
528                                         );
529                                         Err(io::Error::new(
530                                                 io::ErrorKind::InvalidData,
531                                                 "ChannelMonitor was stored under the wrong key",
532                                         ))
533                                 } else {
534                                         Ok((blockhash, channel_monitor))
535                                 }
536                         }
537                         Err(e) => {
538                                 log_error!(
539                                         self.logger,
540                                         "Failed to read ChannelMonitor {}, reason: {}",
541                                         monitor_name.as_str(),
542                                         e,
543                                 );
544                                 Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
545                         }
546                 }
547         }
548
549         /// Read a channel monitor update.
550         fn read_monitor_update(
551                 &self, monitor_name: &MonitorName, update_name: &UpdateName,
552         ) -> Result<ChannelMonitorUpdate, io::Error> {
553                 let update_bytes = self.kv_store.read(
554                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
555                         monitor_name.as_str(),
556                         update_name.as_str(),
557                 )?;
558                 ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
559                         log_error!(
560                                 self.logger,
561                                 "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
562                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
563                                 monitor_name.as_str(),
564                                 update_name.as_str(),
565                                 e,
566                         );
567                         io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
568                 })
569         }
570
571         /// Cleans up stale updates for all monitors.
572         ///
573         /// This function works by first listing all monitors, and then for each of them, listing all
574         /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
575         /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
576         /// be passed to [`KVStore::remove`].
577         pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
578                 let monitor_keys = self.kv_store.list(
579                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
580                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
581                 )?;
582                 for monitor_key in monitor_keys {
583                         let monitor_name = MonitorName::new(monitor_key)?;
584                         let (_, current_monitor) = self.read_monitor(&monitor_name)?;
585                         let updates = self
586                                 .kv_store
587                                 .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str())?;
588                         for update in updates {
589                                 let update_name = UpdateName::new(update)?;
590                                 // if the update_id is lower than the stored monitor, delete
591                                 if update_name.0 <= current_monitor.get_latest_update_id() {
592                                         self.kv_store.remove(
593                                                 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
594                                                 monitor_name.as_str(),
595                                                 update_name.as_str(),
596                                                 lazy,
597                                         )?;
598                                 }
599                         }
600                 }
601                 Ok(())
602         }
603 }
604
605 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref> 
606         Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
607 where
608         K::Target: KVStore,
609         L::Target: Logger,
610         ES::Target: EntropySource + Sized,
611         SP::Target: SignerProvider + Sized,
612 {
613         /// Persists a new channel. This means writing the entire monitor to the
614         /// parametrized [`KVStore`].
615         fn persist_new_channel(
616                 &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
617                 _monitor_update_call_id: MonitorUpdateId,
618         ) -> chain::ChannelMonitorUpdateStatus {
619                 // Determine the proper key for this monitor
620                 let monitor_name = MonitorName::from(funding_txo);
621                 let maybe_old_monitor = self.read_monitor(&monitor_name);
622                 match maybe_old_monitor {
623                         Ok((_, ref old_monitor)) => {
624                                 // Check that this key isn't already storing a monitor with a higher update_id
625                                 // (collision)
626                                 if old_monitor.get_latest_update_id() > monitor.get_latest_update_id() {
627                                         log_error!(
628                                                 self.logger,
629                                                 "Tried to write a monitor at the same outpoint {} with a higher update_id!",
630                                                 monitor_name.as_str()
631                                         );
632                                         return chain::ChannelMonitorUpdateStatus::UnrecoverableError;
633                                 }
634                         }
635                         // This means the channel monitor is new.
636                         Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
637                         _ => return chain::ChannelMonitorUpdateStatus::UnrecoverableError,
638                 }
639                 // Serialize and write the new monitor
640                 let mut monitor_bytes = Vec::with_capacity(
641                         MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
642                 );
643                 monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
644                 monitor.write(&mut monitor_bytes).unwrap();
645                 match self.kv_store.write(
646                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
647                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
648                         monitor_name.as_str(),
649                         &monitor_bytes,
650                 ) {
651                         Ok(_) => {
652                                 // Assess cleanup. Typically, we'll clean up only between the last two known full
653                                 // monitors.
654                                 if let Ok((_, old_monitor)) = maybe_old_monitor {
655                                         let start = old_monitor.get_latest_update_id();
656                                         let end = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
657                                                 // We don't want to clean the rest of u64, so just do possible pending
658                                                 // updates. Note that we never write updates at
659                                                 // `CLOSED_CHANNEL_UPDATE_ID`.
660                                                 cmp::min(
661                                                         start.saturating_add(self.maximum_pending_updates),
662                                                         CLOSED_CHANNEL_UPDATE_ID - 1,
663                                                 )
664                                         } else {
665                                                 monitor.get_latest_update_id().saturating_sub(1)
666                                         };
667                                         // We should bother cleaning up only if there's at least one update
668                                         // expected.
669                                         for update_id in start..=end {
670                                                 let update_name = UpdateName::from(update_id);
671                                                 #[cfg(debug_assertions)]
672                                                 {
673                                                         if let Ok(update) =
674                                                                 self.read_monitor_update(&monitor_name, &update_name)
675                                                         {
676                                                                 // Assert that we are reading what we think we are.
677                                                                 debug_assert_eq!(update.update_id, update_name.0);
678                                                         } else if update_id != start && monitor.get_latest_update_id() != CLOSED_CHANNEL_UPDATE_ID
679                                                         {
680                                                                 // We're deleting something we should know doesn't exist.
681                                                                 panic!(
682                                                                         "failed to read monitor update {}",
683                                                                         update_name.as_str()
684                                                                 );
685                                                         }
686                                                         // On closed channels, we will unavoidably try to read
687                                                         // non-existent updates since we have to guess at the range of
688                                                         // stale updates, so do nothing.
689                                                 }
690                                                 if let Err(e) = self.kv_store.remove(
691                                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
692                                                         monitor_name.as_str(),
693                                                         update_name.as_str(),
694                                                         true,
695                                                 ) {
696                                                         log_error!(
697                                                                 self.logger,
698                                                                 "error cleaning up channel monitor updates for monitor {}, reason: {}",
699                                                                 monitor_name.as_str(),
700                                                                 e
701                                                         );
702                                                 };
703                                         }
704                                 };
705                                 chain::ChannelMonitorUpdateStatus::Completed
706                         }
707                         Err(e) => {
708                                 log_error!(
709                                         self.logger,
710                                         "error writing channel monitor {}/{}/{} reason: {}",
711                                         CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
712                                         CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
713                                         monitor_name.as_str(),
714                                         e
715                                 );
716                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
717                         }
718                 }
719         }
720
721         /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
722         ///
723         /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
724         ///
725         ///   - No full monitor is found in [`KVStore`]
726         ///   - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
727         ///   - LDK commands re-persisting the entire monitor through this function, specifically when
728         ///     `update` is `None`.
729         ///   - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
730         fn update_persisted_channel(
731                 &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
732                 monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
733         ) -> chain::ChannelMonitorUpdateStatus {
734                 // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
735                 // ChannelMonitorUpdate's update_id.
736                 if let Some(update) = update {
737                         if update.update_id != CLOSED_CHANNEL_UPDATE_ID
738                                 && update.update_id % self.maximum_pending_updates != 0
739                         {
740                                 let monitor_name = MonitorName::from(funding_txo);
741                                 let update_name = UpdateName::from(update.update_id);
742                                 match self.kv_store.write(
743                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
744                                         monitor_name.as_str(),
745                                         update_name.as_str(),
746                                         &update.encode(),
747                                 ) {
748                                         Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
749                                         Err(e) => {
750                                                 log_error!(
751                                                         self.logger,
752                                                         "error writing channel monitor update {}/{}/{} reason: {}",
753                                                         CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
754                                                         monitor_name.as_str(),
755                                                         update_name.as_str(),
756                                                         e
757                                                 );
758                                                 chain::ChannelMonitorUpdateStatus::UnrecoverableError
759                                         }
760                                 }
761                         } else {
762                                 // We could write this update, but it meets criteria of our design that call for a full monitor write.
763                                 self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
764                         }
765                 } else {
766                         // There is no update given, so we must persist a new monitor.
767                         self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
768                 }
769         }
770 }
771
772 /// A struct representing a name for a monitor.
773 #[derive(Debug)]
774 struct MonitorName(String);
775
776 impl MonitorName {
777         /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
778         /// be formed from the given `name`.
779         pub fn new(name: String) -> Result<Self, io::Error> {
780                 MonitorName::do_try_into_outpoint(&name)?;
781                 Ok(Self(name))
782         }
783         /// Convert this monitor name to a str.
784         pub fn as_str(&self) -> &str {
785                 &self.0
786         }
787         /// Attempt to form a valid [`OutPoint`] from a given name string.
788         fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
789                 let mut parts = name.splitn(2, '_');
790                 let txid = if let Some(part) = parts.next() {
791                         Txid::from_hex(part).map_err(|_| {
792                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
793                         })?
794                 } else {
795                         return Err(io::Error::new(
796                                 io::ErrorKind::InvalidData,
797                                 "Stored monitor key is not a splittable string",
798                         ));
799                 };
800                 let index = if let Some(part) = parts.next() {
801                         part.parse().map_err(|_| {
802                                 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
803                         })?
804                 } else {
805                         return Err(io::Error::new(
806                                 io::ErrorKind::InvalidData,
807                                 "No tx index value found after underscore in stored key",
808                         ));
809                 };
810                 Ok(OutPoint { txid, index })
811         }
812 }
813
814 impl TryFrom<&MonitorName> for OutPoint {
815         type Error = io::Error;
816
817         fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
818                 MonitorName::do_try_into_outpoint(&value.0)
819         }
820 }
821
822 impl From<OutPoint> for MonitorName {
823         fn from(value: OutPoint) -> Self {
824                 MonitorName(format!("{}_{}", value.txid.to_hex(), value.index))
825         }
826 }
827
828 /// A struct representing a name for an update.
829 #[derive(Debug)]
830 struct UpdateName(u64, String);
831
832 impl UpdateName {
833         /// Constructs an [`UpdateName`], after verifying that an update sequence ID
834         /// can be derived from the given `name`.
835         pub fn new(name: String) -> Result<Self, io::Error> {
836                 match name.parse::<u64>() {
837                         Ok(u) => Ok(u.into()),
838                         Err(_) => {
839                                 Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
840                         }
841                 }
842         }
843
844         /// Convert this monitor update name to a &str
845         pub fn as_str(&self) -> &str {
846                 &self.1
847         }
848 }
849
850 impl From<u64> for UpdateName {
851         fn from(value: u64) -> Self {
852                 Self(value, value.to_string())
853         }
854 }
855
856 #[cfg(test)]
857 mod tests {
858         use super::*;
859         use crate::chain::chainmonitor::Persist;
860         use crate::chain::ChannelMonitorUpdateStatus;
861         use crate::events::{ClosureReason, MessageSendEventsProvider};
862         use crate::ln::functional_test_utils::*;
863         use crate::util::test_utils::{self, TestLogger, TestStore};
864         use crate::{check_added_monitors, check_closed_broadcast};
865
866         const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
867
868         #[test]
869         fn converts_u64_to_update_name() {
870                 assert_eq!(UpdateName::from(0).as_str(), "0");
871                 assert_eq!(UpdateName::from(21).as_str(), "21");
872                 assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
873         }
874
875         #[test]
876         fn bad_update_name_fails() {
877                 assert!(UpdateName::new("deadbeef".to_string()).is_err());
878                 assert!(UpdateName::new("-1".to_string()).is_err());
879         }
880
881         #[test]
882         fn monitor_from_outpoint_works() {
883                 let monitor_name1 = MonitorName::from(OutPoint {
884                         txid: Txid::from_hex("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
885                         index: 1,
886                 });
887                 assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
888
889                 let monitor_name2 = MonitorName::from(OutPoint {
890                         txid: Txid::from_hex("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
891                         index: u16::MAX,
892                 });
893                 assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
894         }
895
896         #[test]
897         fn bad_monitor_string_fails() {
898                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
899                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
900                 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
901         }
902
903         // Exercise the `MonitorUpdatingPersister` with real channels and payments.
904         #[test]
905         fn persister_with_real_monitors() {
906                 // This value is used later to limit how many iterations we perform.
907                 let test_max_pending_updates = 7;
908                 let chanmon_cfgs = create_chanmon_cfgs(4);
909                 let persister_0 = MonitorUpdatingPersister {
910                         kv_store: &TestStore::new(false),
911                         logger: &TestLogger::new(),
912                         maximum_pending_updates: test_max_pending_updates,
913                         entropy_source: &chanmon_cfgs[0].keys_manager,
914                         signer_provider: &chanmon_cfgs[0].keys_manager,
915                 };
916                 let persister_1 = MonitorUpdatingPersister {
917                         kv_store: &TestStore::new(false),
918                         logger: &TestLogger::new(),
919                         // Intentionally set this to a smaller value to test a different alignment.
920                         maximum_pending_updates: 3,
921                         entropy_source: &chanmon_cfgs[1].keys_manager,
922                         signer_provider: &chanmon_cfgs[1].keys_manager,
923                 };
924                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
925                 let chain_mon_0 = test_utils::TestChainMonitor::new(
926                         Some(&chanmon_cfgs[0].chain_source),
927                         &chanmon_cfgs[0].tx_broadcaster,
928                         &chanmon_cfgs[0].logger,
929                         &chanmon_cfgs[0].fee_estimator,
930                         &persister_0,
931                         &chanmon_cfgs[0].keys_manager,
932                 );
933                 let chain_mon_1 = test_utils::TestChainMonitor::new(
934                         Some(&chanmon_cfgs[1].chain_source),
935                         &chanmon_cfgs[1].tx_broadcaster,
936                         &chanmon_cfgs[1].logger,
937                         &chanmon_cfgs[1].fee_estimator,
938                         &persister_1,
939                         &chanmon_cfgs[1].keys_manager,
940                 );
941                 node_cfgs[0].chain_monitor = chain_mon_0;
942                 node_cfgs[1].chain_monitor = chain_mon_1;
943                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
944                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
945
946                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
947                 let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
948
949                 // Check that the persisted channel data is empty before any channels are
950                 // open.
951                 let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
952                         broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
953                 assert_eq!(persisted_chan_data_0.len(), 0);
954                 let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
955                         broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
956                 assert_eq!(persisted_chan_data_1.len(), 0);
957
958                 // Helper to make sure the channel is on the expected update ID.
959                 macro_rules! check_persisted_data {
960                         ($expected_update_id: expr) => {
961                                 persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
962                                         broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
963                                 // check that we stored only one monitor
964                                 assert_eq!(persisted_chan_data_0.len(), 1);
965                                 for (_, mon) in persisted_chan_data_0.iter() {
966                                         // check that when we read it, we got the right update id
967                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
968                                         // if the CM is at the correct update id without updates, ensure no updates are stored
969                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
970                                         let (_, cm_0) = persister_0.read_monitor(&monitor_name).unwrap();
971                                         if cm_0.get_latest_update_id() == $expected_update_id {
972                                                 assert_eq!(
973                                                         persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
974                                                                 monitor_name.as_str()).unwrap().len(),
975                                                         0,
976                                                         "updates stored when they shouldn't be in persister 0"
977                                                 );
978                                         }
979                                 }
980                                 persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
981                                         broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
982                                 assert_eq!(persisted_chan_data_1.len(), 1);
983                                 for (_, mon) in persisted_chan_data_1.iter() {
984                                         assert_eq!(mon.get_latest_update_id(), $expected_update_id);
985                                         let monitor_name = MonitorName::from(mon.get_funding_txo().0);
986                                         let (_, cm_1) = persister_1.read_monitor(&monitor_name).unwrap();
987                                         if cm_1.get_latest_update_id() == $expected_update_id {
988                                                 assert_eq!(
989                                                         persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
990                                                                 monitor_name.as_str()).unwrap().len(),
991                                                         0,
992                                                         "updates stored when they shouldn't be in persister 1"
993                                                 );
994                                         }
995                                 }
996                         };
997                 }
998
999                 // Create some initial channel and check that a channel was persisted.
1000                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1001                 check_persisted_data!(0);
1002
1003                 // Send a few payments and make sure the monitors are updated to the latest.
1004                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1005                 check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
1006                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1007                 check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
1008
1009                 // Send a few more payments to try all the alignments of max pending updates with
1010                 // updates for a payment sent and received.
1011                 let mut sender = 0;
1012                 for i in 3..=test_max_pending_updates * 2 {
1013                         let receiver;
1014                         if sender == 0 {
1015                                 sender = 1;
1016                                 receiver = 0;
1017                         } else {
1018                                 sender = 0;
1019                                 receiver = 1;
1020                         }
1021                         send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1022                         check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1023                 }
1024
1025                 // Force close because cooperative close doesn't result in any persisted
1026                 // updates.
1027                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1028
1029                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1030                 check_closed_broadcast!(nodes[0], true);
1031                 check_added_monitors!(nodes[0], 1);
1032
1033                 let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1034                 assert_eq!(node_txn.len(), 1);
1035
1036                 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
1037
1038                 check_closed_broadcast!(nodes[1], true);
1039                 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
1040                 check_added_monitors!(nodes[1], 1);
1041
1042                 // Make sure everything is persisted as expected after close.
1043                 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
1044
1045                 // Make sure the expected number of stale updates is present.
1046                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1047                 let (_, monitor) = &persisted_chan_data[0];
1048                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1049                 // The channel should have 0 updates, as it wrote a full monitor and consolidated.
1050                 assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1051                 assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1052         }
1053
1054         // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1055         // monitor or update with it results in the persister returning an UnrecoverableError status.
1056         #[test]
1057         fn unrecoverable_error_on_write_failure() {
1058                 // Set up a dummy channel and force close. This will produce a monitor
1059                 // that we can then use to test persistence.
1060                 let chanmon_cfgs = create_chanmon_cfgs(2);
1061                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1062                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1063                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1064                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1065                 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
1066                 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
1067                 {
1068                         let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1069                         let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
1070                         let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
1071                         let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1072                         let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
1073                         let test_txo = OutPoint { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
1074
1075                         let ro_persister = MonitorUpdatingPersister {
1076                                 kv_store: &TestStore::new(true),
1077                                 logger: &TestLogger::new(),
1078                                 maximum_pending_updates: 11,
1079                                 entropy_source: node_cfgs[0].keys_manager,
1080                                 signer_provider: node_cfgs[0].keys_manager,
1081                         };
1082                         match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
1083                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1084                                         // correct result
1085                                 }
1086                                 ChannelMonitorUpdateStatus::Completed => {
1087                                         panic!("Completed persisting new channel when shouldn't have")
1088                                 }
1089                                 ChannelMonitorUpdateStatus::InProgress => {
1090                                         panic!("Returned InProgress when shouldn't have")
1091                                 }
1092                         }
1093                         match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
1094                                 ChannelMonitorUpdateStatus::UnrecoverableError => {
1095                                         // correct result
1096                                 }
1097                                 ChannelMonitorUpdateStatus::Completed => {
1098                                         panic!("Completed persisting new channel when shouldn't have")
1099                                 }
1100                                 ChannelMonitorUpdateStatus::InProgress => {
1101                                         panic!("Returned InProgress when shouldn't have")
1102                                 }
1103                         }
1104                         added_monitors.clear();
1105                 }
1106                 nodes[1].node.get_and_clear_pending_msg_events();
1107         }
1108
1109         // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1110         #[test]
1111         fn clean_stale_updates_works() {
1112                 let test_max_pending_updates = 7;
1113                 let chanmon_cfgs = create_chanmon_cfgs(3);
1114                 let persister_0 = MonitorUpdatingPersister {
1115                         kv_store: &TestStore::new(false),
1116                         logger: &TestLogger::new(),
1117                         maximum_pending_updates: test_max_pending_updates,
1118                         entropy_source: &chanmon_cfgs[0].keys_manager,
1119                         signer_provider: &chanmon_cfgs[0].keys_manager,
1120                 };
1121                 let persister_1 = MonitorUpdatingPersister {
1122                         kv_store: &TestStore::new(false),
1123                         logger: &TestLogger::new(),
1124                         maximum_pending_updates: test_max_pending_updates,
1125                         entropy_source: &chanmon_cfgs[1].keys_manager,
1126                         signer_provider: &chanmon_cfgs[1].keys_manager,
1127                 };
1128                 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1129                 let chain_mon_0 = test_utils::TestChainMonitor::new(
1130                         Some(&chanmon_cfgs[0].chain_source),
1131                         &chanmon_cfgs[0].tx_broadcaster,
1132                         &chanmon_cfgs[0].logger,
1133                         &chanmon_cfgs[0].fee_estimator,
1134                         &persister_0,
1135                         &chanmon_cfgs[0].keys_manager,
1136                 );
1137                 let chain_mon_1 = test_utils::TestChainMonitor::new(
1138                         Some(&chanmon_cfgs[1].chain_source),
1139                         &chanmon_cfgs[1].tx_broadcaster,
1140                         &chanmon_cfgs[1].logger,
1141                         &chanmon_cfgs[1].fee_estimator,
1142                         &persister_1,
1143                         &chanmon_cfgs[1].keys_manager,
1144                 );
1145                 node_cfgs[0].chain_monitor = chain_mon_0;
1146                 node_cfgs[1].chain_monitor = chain_mon_1;
1147                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1148                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1149
1150                 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
1151
1152                 // Check that the persisted channel data is empty before any channels are
1153                 // open.
1154                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1155                 assert_eq!(persisted_chan_data.len(), 0);
1156
1157                 // Create some initial channel
1158                 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1159
1160                 // Send a few payments to advance the updates a bit
1161                 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1162                 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1163
1164                 // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1165                 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1166                 let (_, monitor) = &persisted_chan_data[0];
1167                 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1168                 persister_0
1169                         .kv_store
1170                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
1171                         .unwrap();
1172
1173                 // Do the stale update cleanup
1174                 persister_0.cleanup_stale_updates(false).unwrap();
1175
1176                 // Confirm the stale update is unreadable/gone
1177                 assert!(persister_0
1178                         .kv_store
1179                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
1180                         .is_err());
1181
1182                 // Force close.
1183                 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1184                 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1185                 check_closed_broadcast!(nodes[0], true);
1186                 check_added_monitors!(nodes[0], 1);
1187
1188                 // Write an update near u64::MAX
1189                 persister_0
1190                         .kv_store
1191                         .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
1192                         .unwrap();
1193
1194                 // Do the stale update cleanup
1195                 persister_0.cleanup_stale_updates(false).unwrap();
1196
1197                 // Confirm the stale update is unreadable/gone
1198                 assert!(persister_0
1199                         .kv_store
1200                         .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
1201                         .is_err());
1202         }
1203 }