1 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4 // You may not use this file except in accordance with one or both of these
7 //! This module contains a simple key-value store trait [`KVStore`] that
8 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9 //! and [`ChannelMonitor`] all in one place.
11 //! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
15 use core::str::FromStr;
16 use bitcoin::{BlockHash, Txid};
18 use crate::{io, log_error};
19 use crate::prelude::*;
22 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
23 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
24 use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
25 use crate::chain::transaction::OutPoint;
26 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
27 use crate::ln::channelmanager::AChannelManager;
28 use crate::routing::gossip::NetworkGraph;
29 use crate::routing::scoring::WriteableScore;
30 use crate::util::logger::Logger;
31 use crate::util::ser::{Readable, ReadableArgs, Writeable};
33 /// The alphabet of characters allowed for namespaces and keys.
34 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
36 /// The maximum number of characters namespaces and keys may have.
37 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
39 /// The primary namespace under which the [`ChannelManager`] will be persisted.
41 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
42 pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
43 /// The secondary namespace under which the [`ChannelManager`] will be persisted.
45 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
46 pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
47 /// The key under which the [`ChannelManager`] will be persisted.
49 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
50 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
52 /// The primary namespace under which [`ChannelMonitor`]s will be persisted.
53 pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
54 /// The secondary namespace under which [`ChannelMonitor`]s will be persisted.
55 pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
56 /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
57 pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
59 /// The primary namespace under which the [`NetworkGraph`] will be persisted.
60 pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
61 /// The secondary namespace under which the [`NetworkGraph`] will be persisted.
62 pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
63 /// The key under which the [`NetworkGraph`] will be persisted.
64 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
66 /// The primary namespace under which the [`WriteableScore`] will be persisted.
67 pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
68 /// The secondary namespace under which the [`WriteableScore`] will be persisted.
69 pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
70 /// The key under which the [`WriteableScore`] will be persisted.
71 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
73 /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
75 /// This serves to prevent someone from accidentally loading such monitors (which may need
76 /// updates applied to be current) with another implementation.
77 pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
79 /// Provides an interface that allows storage and retrieval of persisted values that are associated
82 /// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
83 /// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
84 /// ways, as long as per-namespace key uniqueness is asserted.
86 /// Keys and namespaces are required to be valid ASCII strings in the range of
87 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
88 /// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
89 /// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
90 /// that concerns should always be separated by primary namespace first, before secondary
91 /// namespaces are used. While the number of primary namespaces will be relatively small and is
92 /// determined at compile time, there may be many secondary namespaces per primary namespace. Note
93 /// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
94 /// namespace, i.e., conflicts between keys and equally named
95 /// primary namespaces/secondary namespaces must be avoided.
97 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
98 /// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
99 /// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
101 /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
104 /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
105 /// `primary_namespace` and `secondary_namespace`.
107 /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
108 fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
109 /// Persists the given data under the given `key`.
111 /// Will create the given `primary_namespace` and `secondary_namespace` if not already present
113 fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
114 /// Removes any data that had previously been persisted under the given `key`.
116 /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
117 /// remove the given `key` at some point in time after the method returns, e.g., as part of an
118 /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
119 /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
121 /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
122 /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
123 /// potentially get lost on crash after the method returns. Therefore, this flag should only be
124 /// set for `remove` operations that can be safely replayed at a later time.
126 /// Returns successfully if no data will be stored for the given `primary_namespace`,
127 /// `secondary_namespace`, and `key`, independently of whether it was present before its
128 /// invokation or not.
129 fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
130 /// Returns a list of keys that are stored under the given `secondary_namespace` in
131 /// `primary_namespace`.
133 /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
134 /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
135 fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
138 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
140 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
141 pub trait Persister<'a, CM: Deref, L: Deref, S: WriteableScore<'a>>
143 CM::Target: 'static + AChannelManager,
144 L::Target: 'static + Logger,
146 /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
148 /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
149 fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error>;
151 /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
152 fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
154 /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
155 fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
159 impl<'a, A: KVStore + ?Sized, CM: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, CM, L, S> for A
161 CM::Target: 'static + AChannelManager,
162 L::Target: 'static + Logger,
164 fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> {
165 self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
166 CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
167 CHANNEL_MANAGER_PERSISTENCE_KEY,
168 &channel_manager.get_cm().encode())
171 fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
172 self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
173 NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
174 NETWORK_GRAPH_PERSISTENCE_KEY,
175 &network_graph.encode())
178 fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
179 self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
180 SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
181 SCORER_PERSISTENCE_KEY,
186 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore + ?Sized> Persist<ChannelSigner> for K {
187 // TODO: We really need a way for the persister to inform the user that its time to crash/shut
188 // down once these start returning failure.
189 // Then we should return InProgress rather than UnrecoverableError, implying we should probably
190 // just shut down the node since we're not retrying persistence!
192 fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
193 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
195 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
196 CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
197 &key, &monitor.encode())
199 Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
200 Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
204 fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
205 let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
207 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
208 CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
209 &key, &monitor.encode())
211 Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
212 Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
217 /// Read previously persisted [`ChannelMonitor`]s from the store.
218 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
219 kv_store: K, entropy_source: ES, signer_provider: SP,
220 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
223 ES::Target: EntropySource + Sized,
224 SP::Target: SignerProvider + Sized,
226 let mut res = Vec::new();
228 for stored_key in kv_store.list(
229 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)?
231 if stored_key.len() < 66 {
232 return Err(io::Error::new(
233 io::ErrorKind::InvalidData,
234 "Stored key has invalid length"));
237 let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
238 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
241 let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
242 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
245 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
246 &mut io::Cursor::new(
247 kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
248 (&*entropy_source, &*signer_provider),
250 Ok((block_hash, channel_monitor)) => {
251 if channel_monitor.get_funding_txo().0.txid != txid
252 || channel_monitor.get_funding_txo().0.index != index
254 return Err(io::Error::new(
255 io::ErrorKind::InvalidData,
256 "ChannelMonitor was stored under the wrong key",
259 res.push((block_hash, channel_monitor));
262 return Err(io::Error::new(
263 io::ErrorKind::InvalidData,
264 "Failed to read ChannelMonitor"
272 /// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
273 /// [`ChannelMonitorUpdate`]s.
277 /// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
278 /// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
279 /// deleting) and complexity. This is because it writes channel monitor differential updates,
280 /// whereas the other (default) implementation rewrites the entire monitor on each update. For
281 /// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
282 /// of megabytes (or more). Updates can be as small as a few hundred bytes.
284 /// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
285 /// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
286 /// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
287 /// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
288 /// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
291 /// # Storing monitors
293 /// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
295 /// - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
296 /// - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
298 /// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`],
299 /// using the familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
301 /// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
303 /// - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`]
304 /// - secondary namespace: [the monitor's encoded outpoint name]
306 /// Under that secondary namespace, each update is stored with a number string, like `21`, which
307 /// represents its `update_id` value.
309 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
311 /// - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
314 /// Full channel monitors would be stored at a single key:
316 /// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
318 /// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
321 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
322 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
323 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
327 /// # Reading channel state from storage
329 /// Channel state can be reconstructed by calling
330 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
331 /// list channel monitors themselves and load channels individually using
332 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
334 /// ## EXTREMELY IMPORTANT
336 /// It is extremely important that your [`KVStore::read`] implementation uses the
337 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
338 /// that circumstance (not when there is really a permissions error, for example). This is because
339 /// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
340 /// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
341 /// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
343 /// # Pruning stale channel updates
345 /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
346 /// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
348 /// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
349 /// will complete. However, stale updates are not a problem for data integrity, since updates are
350 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
352 /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
353 /// would like to get rid of them, consider using the
354 /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
355 pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
359 ES::Target: EntropySource + Sized,
360 SP::Target: SignerProvider + Sized,
364 maximum_pending_updates: u64,
370 impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
371 MonitorUpdatingPersister<K, L, ES, SP>
375 ES::Target: EntropySource + Sized,
376 SP::Target: SignerProvider + Sized,
378 /// Constructs a new [`MonitorUpdatingPersister`].
380 /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
381 /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
382 /// consolidation will frequently occur with fewer updates than what you set here; this number
383 /// is merely the maximum that may be stored. When setting this value, consider that for higher
384 /// values of `maximum_pending_updates`:
386 /// - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
387 /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
388 /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
389 /// - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
390 /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
391 /// less frequent "waves."
392 /// - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
393 /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
395 kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
398 MonitorUpdatingPersister {
401 maximum_pending_updates,
407 /// Reads all stored channel monitors, along with any stored updates for them.
409 /// It is extremely important that your [`KVStore::read`] implementation uses the
410 /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
411 /// documentation for [`MonitorUpdatingPersister`].
412 pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
413 &self, broadcaster: &B, fee_estimator: &F,
414 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
416 B::Target: BroadcasterInterface,
417 F::Target: FeeEstimator,
419 let monitor_list = self.kv_store.list(
420 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
421 CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
423 let mut res = Vec::with_capacity(monitor_list.len());
424 for monitor_key in monitor_list {
425 res.push(self.read_channel_monitor_with_updates(
434 /// Read a single channel monitor, along with any stored updates for it.
436 /// It is extremely important that your [`KVStore::read`] implementation uses the
437 /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
438 /// documentation for [`MonitorUpdatingPersister`].
440 /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
441 /// [`OutPoint`], with an underscore `_` between them. For example, given:
443 /// - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
446 /// The correct `monitor_key` would be:
447 /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
449 /// Loading a large number of monitors will be faster if done in parallel. You can use this
450 /// function to accomplish this. Take care to limit the number of parallel readers.
451 pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
452 &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
453 ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
455 B::Target: BroadcasterInterface,
456 F::Target: FeeEstimator,
458 let monitor_name = MonitorName::new(monitor_key)?;
459 let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
460 let mut current_update_id = monitor.get_latest_update_id();
462 current_update_id = match current_update_id.checked_add(1) {
463 Some(next_update_id) => next_update_id,
466 let update_name = UpdateName::from(current_update_id);
467 let update = match self.read_monitor_update(&monitor_name, &update_name) {
468 Ok(update) => update,
469 Err(err) if err.kind() == io::ErrorKind::NotFound => {
470 // We can't find any more updates, so we are done.
473 Err(err) => return Err(err),
476 monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
480 "Monitor update failed. monitor: {} update: {} reason: {:?}",
481 monitor_name.as_str(),
482 update_name.as_str(),
485 io::Error::new(io::ErrorKind::Other, "Monitor update failed")
488 Ok((block_hash, monitor))
491 /// Read a channel monitor.
493 &self, monitor_name: &MonitorName,
494 ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
495 let outpoint: OutPoint = monitor_name.try_into()?;
496 let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
497 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
498 CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
499 monitor_name.as_str(),
501 // Discard the sentinel bytes if found.
502 if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
503 monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
505 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
507 (&*self.entropy_source, &*self.signer_provider),
509 Ok((blockhash, channel_monitor)) => {
510 if channel_monitor.get_funding_txo().0.txid != outpoint.txid
511 || channel_monitor.get_funding_txo().0.index != outpoint.index
515 "ChannelMonitor {} was stored under the wrong key!",
516 monitor_name.as_str()
519 io::ErrorKind::InvalidData,
520 "ChannelMonitor was stored under the wrong key",
523 Ok((blockhash, channel_monitor))
529 "Failed to read ChannelMonitor {}, reason: {}",
530 monitor_name.as_str(),
533 Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
538 /// Read a channel monitor update.
539 fn read_monitor_update(
540 &self, monitor_name: &MonitorName, update_name: &UpdateName,
541 ) -> Result<ChannelMonitorUpdate, io::Error> {
542 let update_bytes = self.kv_store.read(
543 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
544 monitor_name.as_str(),
545 update_name.as_str(),
547 ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
550 "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
551 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
552 monitor_name.as_str(),
553 update_name.as_str(),
556 io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
560 /// Cleans up stale updates for all monitors.
562 /// This function works by first listing all monitors, and then for each of them, listing all
563 /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
564 /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
565 /// be passed to [`KVStore::remove`].
566 pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
567 let monitor_keys = self.kv_store.list(
568 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
569 CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
571 for monitor_key in monitor_keys {
572 let monitor_name = MonitorName::new(monitor_key)?;
573 let (_, current_monitor) = self.read_monitor(&monitor_name)?;
576 .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?;
577 for update in updates {
578 let update_name = UpdateName::new(update)?;
579 // if the update_id is lower than the stored monitor, delete
580 if update_name.0 <= current_monitor.get_latest_update_id() {
581 self.kv_store.remove(
582 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
583 monitor_name.as_str(),
584 update_name.as_str(),
594 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
595 Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
599 ES::Target: EntropySource + Sized,
600 SP::Target: SignerProvider + Sized,
602 /// Persists a new channel. This means writing the entire monitor to the
603 /// parametrized [`KVStore`].
604 fn persist_new_channel(
605 &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
606 _monitor_update_call_id: MonitorUpdateId,
607 ) -> chain::ChannelMonitorUpdateStatus {
608 // Determine the proper key for this monitor
609 let monitor_name = MonitorName::from(funding_txo);
610 // Serialize and write the new monitor
611 let mut monitor_bytes = Vec::with_capacity(
612 MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
614 monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
615 monitor.write(&mut monitor_bytes).unwrap();
616 match self.kv_store.write(
617 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
618 CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
619 monitor_name.as_str(),
623 chain::ChannelMonitorUpdateStatus::Completed
628 "Failed to write ChannelMonitor {}/{}/{} reason: {}",
629 CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
630 CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
631 monitor_name.as_str(),
634 chain::ChannelMonitorUpdateStatus::UnrecoverableError
639 /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
641 /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
643 /// - No full monitor is found in [`KVStore`]
644 /// - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
645 /// - LDK commands re-persisting the entire monitor through this function, specifically when
646 /// `update` is `None`.
647 /// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
648 fn update_persisted_channel(
649 &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
650 monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
651 ) -> chain::ChannelMonitorUpdateStatus {
652 // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
653 // ChannelMonitorUpdate's update_id.
654 if let Some(update) = update {
655 if update.update_id != CLOSED_CHANNEL_UPDATE_ID
656 && update.update_id % self.maximum_pending_updates != 0
658 let monitor_name = MonitorName::from(funding_txo);
659 let update_name = UpdateName::from(update.update_id);
660 match self.kv_store.write(
661 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
662 monitor_name.as_str(),
663 update_name.as_str(),
666 Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
670 "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
671 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
672 monitor_name.as_str(),
673 update_name.as_str(),
676 chain::ChannelMonitorUpdateStatus::UnrecoverableError
680 let monitor_name = MonitorName::from(funding_txo);
681 // In case of channel-close monitor update, we need to read old monitor before persisting
682 // the new one in order to determine the cleanup range.
683 let maybe_old_monitor = match monitor.get_latest_update_id() {
684 CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
688 // We could write this update, but it meets criteria of our design that calls for a full monitor write.
689 let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
691 if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
692 let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
693 // If there is an error while reading old monitor, we skip clean up.
694 maybe_old_monitor.map(|(_, ref old_monitor)| {
695 let start = old_monitor.get_latest_update_id();
696 // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
698 start.saturating_add(self.maximum_pending_updates),
699 CLOSED_CHANNEL_UPDATE_ID - 1,
704 let end = monitor.get_latest_update_id();
705 let start = end.saturating_sub(self.maximum_pending_updates);
709 if let Some((start, end)) = cleanup_range {
710 self.cleanup_in_range(monitor_name, start, end);
714 monitor_update_status
717 // There is no update given, so we must persist a new monitor.
718 self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
723 impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
725 ES::Target: EntropySource + Sized,
728 SP::Target: SignerProvider + Sized
730 // Cleans up monitor updates for given monitor in range `start..=end`.
731 fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
732 for update_id in start..=end {
733 let update_name = UpdateName::from(update_id);
734 if let Err(e) = self.kv_store.remove(
735 CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
736 monitor_name.as_str(),
737 update_name.as_str(),
742 "Failed to clean up channel monitor updates for monitor {}, reason: {}",
743 monitor_name.as_str(),
751 /// A struct representing a name for a monitor.
753 struct MonitorName(String);
756 /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
757 /// be formed from the given `name`.
758 pub fn new(name: String) -> Result<Self, io::Error> {
759 MonitorName::do_try_into_outpoint(&name)?;
762 /// Convert this monitor name to a str.
763 pub fn as_str(&self) -> &str {
766 /// Attempt to form a valid [`OutPoint`] from a given name string.
767 fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
768 let mut parts = name.splitn(2, '_');
769 let txid = if let Some(part) = parts.next() {
770 Txid::from_str(part).map_err(|_| {
771 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
774 return Err(io::Error::new(
775 io::ErrorKind::InvalidData,
776 "Stored monitor key is not a splittable string",
779 let index = if let Some(part) = parts.next() {
780 part.parse().map_err(|_| {
781 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
784 return Err(io::Error::new(
785 io::ErrorKind::InvalidData,
786 "No tx index value found after underscore in stored key",
789 Ok(OutPoint { txid, index })
793 impl TryFrom<&MonitorName> for OutPoint {
794 type Error = io::Error;
796 fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
797 MonitorName::do_try_into_outpoint(&value.0)
801 impl From<OutPoint> for MonitorName {
802 fn from(value: OutPoint) -> Self {
803 MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
807 /// A struct representing a name for an update.
809 struct UpdateName(u64, String);
812 /// Constructs an [`UpdateName`], after verifying that an update sequence ID
813 /// can be derived from the given `name`.
814 pub fn new(name: String) -> Result<Self, io::Error> {
815 match name.parse::<u64>() {
816 Ok(u) => Ok(u.into()),
818 Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
823 /// Convert this monitor update name to a &str
824 pub fn as_str(&self) -> &str {
829 impl From<u64> for UpdateName {
830 fn from(value: u64) -> Self {
831 Self(value, value.to_string())
838 use crate::chain::ChannelMonitorUpdateStatus;
839 use crate::events::{ClosureReason, MessageSendEventsProvider};
840 use crate::ln::functional_test_utils::*;
841 use crate::util::test_utils::{self, TestLogger, TestStore};
842 use crate::{check_added_monitors, check_closed_broadcast};
843 use crate::sync::Arc;
844 use crate::util::test_channel_signer::TestChannelSigner;
846 const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
849 fn converts_u64_to_update_name() {
850 assert_eq!(UpdateName::from(0).as_str(), "0");
851 assert_eq!(UpdateName::from(21).as_str(), "21");
852 assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
856 fn bad_update_name_fails() {
857 assert!(UpdateName::new("deadbeef".to_string()).is_err());
858 assert!(UpdateName::new("-1".to_string()).is_err());
862 fn monitor_from_outpoint_works() {
863 let monitor_name1 = MonitorName::from(OutPoint {
864 txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
867 assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
869 let monitor_name2 = MonitorName::from(OutPoint {
870 txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
873 assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
877 fn bad_monitor_string_fails() {
878 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
879 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
880 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
883 // Exercise the `MonitorUpdatingPersister` with real channels and payments.
885 fn persister_with_real_monitors() {
886 // This value is used later to limit how many iterations we perform.
887 let persister_0_max_pending_updates = 7;
888 // Intentionally set this to a smaller value to test a different alignment.
889 let persister_1_max_pending_updates = 3;
890 let chanmon_cfgs = create_chanmon_cfgs(4);
891 let persister_0 = MonitorUpdatingPersister {
892 kv_store: &TestStore::new(false),
893 logger: &TestLogger::new(),
894 maximum_pending_updates: persister_0_max_pending_updates,
895 entropy_source: &chanmon_cfgs[0].keys_manager,
896 signer_provider: &chanmon_cfgs[0].keys_manager,
898 let persister_1 = MonitorUpdatingPersister {
899 kv_store: &TestStore::new(false),
900 logger: &TestLogger::new(),
901 maximum_pending_updates: persister_1_max_pending_updates,
902 entropy_source: &chanmon_cfgs[1].keys_manager,
903 signer_provider: &chanmon_cfgs[1].keys_manager,
905 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
906 let chain_mon_0 = test_utils::TestChainMonitor::new(
907 Some(&chanmon_cfgs[0].chain_source),
908 &chanmon_cfgs[0].tx_broadcaster,
909 &chanmon_cfgs[0].logger,
910 &chanmon_cfgs[0].fee_estimator,
912 &chanmon_cfgs[0].keys_manager,
914 let chain_mon_1 = test_utils::TestChainMonitor::new(
915 Some(&chanmon_cfgs[1].chain_source),
916 &chanmon_cfgs[1].tx_broadcaster,
917 &chanmon_cfgs[1].logger,
918 &chanmon_cfgs[1].fee_estimator,
920 &chanmon_cfgs[1].keys_manager,
922 node_cfgs[0].chain_monitor = chain_mon_0;
923 node_cfgs[1].chain_monitor = chain_mon_1;
924 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
925 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
926 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
927 let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
929 // Check that the persisted channel data is empty before any channels are
931 let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
932 &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
933 assert_eq!(persisted_chan_data_0.len(), 0);
934 let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
935 &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
936 assert_eq!(persisted_chan_data_1.len(), 0);
938 // Helper to make sure the channel is on the expected update ID.
939 macro_rules! check_persisted_data {
940 ($expected_update_id: expr) => {
941 persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
942 &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
943 // check that we stored only one monitor
944 assert_eq!(persisted_chan_data_0.len(), 1);
945 for (_, mon) in persisted_chan_data_0.iter() {
946 // check that when we read it, we got the right update id
947 assert_eq!(mon.get_latest_update_id(), $expected_update_id);
949 // if the CM is at consolidation threshold, ensure no updates are stored.
950 let monitor_name = MonitorName::from(mon.get_funding_txo().0);
951 if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
952 || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
954 persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
955 monitor_name.as_str()).unwrap().len(),
957 "updates stored when they shouldn't be in persister 0"
961 persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
962 &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
963 assert_eq!(persisted_chan_data_1.len(), 1);
964 for (_, mon) in persisted_chan_data_1.iter() {
965 assert_eq!(mon.get_latest_update_id(), $expected_update_id);
966 let monitor_name = MonitorName::from(mon.get_funding_txo().0);
967 // if the CM is at consolidation threshold, ensure no updates are stored.
968 if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
969 || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
971 persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
972 monitor_name.as_str()).unwrap().len(),
974 "updates stored when they shouldn't be in persister 1"
981 // Create some initial channel and check that a channel was persisted.
982 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
983 check_persisted_data!(0);
985 // Send a few payments and make sure the monitors are updated to the latest.
986 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
987 check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
988 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
989 check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
991 // Send a few more payments to try all the alignments of max pending updates with
992 // updates for a payment sent and received.
994 for i in 3..=persister_0_max_pending_updates * 2 {
1003 send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1004 check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1007 // Force close because cooperative close doesn't result in any persisted
1009 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1011 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1012 check_closed_broadcast!(nodes[0], true);
1013 check_added_monitors!(nodes[0], 1);
1015 let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1016 assert_eq!(node_txn.len(), 1);
1018 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
1020 check_closed_broadcast!(nodes[1], true);
1021 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
1022 check_added_monitors!(nodes[1], 1);
1024 // Make sure everything is persisted as expected after close.
1025 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
1027 // Make sure the expected number of stale updates is present.
1028 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1029 let (_, monitor) = &persisted_chan_data[0];
1030 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1031 // The channel should have 0 updates, as it wrote a full monitor and consolidated.
1032 assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1033 assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1036 // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1037 // monitor or update with it results in the persister returning an UnrecoverableError status.
1039 fn unrecoverable_error_on_write_failure() {
1040 // Set up a dummy channel and force close. This will produce a monitor
1041 // that we can then use to test persistence.
1042 let chanmon_cfgs = create_chanmon_cfgs(2);
1043 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1044 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1045 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1046 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1047 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
1048 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
1050 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1051 let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
1052 let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
1053 let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1054 let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
1055 let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
1057 let ro_persister = MonitorUpdatingPersister {
1058 kv_store: &TestStore::new(true),
1059 logger: &TestLogger::new(),
1060 maximum_pending_updates: 11,
1061 entropy_source: node_cfgs[0].keys_manager,
1062 signer_provider: node_cfgs[0].keys_manager,
1064 match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
1065 ChannelMonitorUpdateStatus::UnrecoverableError => {
1068 ChannelMonitorUpdateStatus::Completed => {
1069 panic!("Completed persisting new channel when shouldn't have")
1071 ChannelMonitorUpdateStatus::InProgress => {
1072 panic!("Returned InProgress when shouldn't have")
1075 match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
1076 ChannelMonitorUpdateStatus::UnrecoverableError => {
1079 ChannelMonitorUpdateStatus::Completed => {
1080 panic!("Completed persisting new channel when shouldn't have")
1082 ChannelMonitorUpdateStatus::InProgress => {
1083 panic!("Returned InProgress when shouldn't have")
1086 added_monitors.clear();
1088 nodes[1].node.get_and_clear_pending_msg_events();
1091 // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1093 fn clean_stale_updates_works() {
1094 let test_max_pending_updates = 7;
1095 let chanmon_cfgs = create_chanmon_cfgs(3);
1096 let persister_0 = MonitorUpdatingPersister {
1097 kv_store: &TestStore::new(false),
1098 logger: &TestLogger::new(),
1099 maximum_pending_updates: test_max_pending_updates,
1100 entropy_source: &chanmon_cfgs[0].keys_manager,
1101 signer_provider: &chanmon_cfgs[0].keys_manager,
1103 let persister_1 = MonitorUpdatingPersister {
1104 kv_store: &TestStore::new(false),
1105 logger: &TestLogger::new(),
1106 maximum_pending_updates: test_max_pending_updates,
1107 entropy_source: &chanmon_cfgs[1].keys_manager,
1108 signer_provider: &chanmon_cfgs[1].keys_manager,
1110 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1111 let chain_mon_0 = test_utils::TestChainMonitor::new(
1112 Some(&chanmon_cfgs[0].chain_source),
1113 &chanmon_cfgs[0].tx_broadcaster,
1114 &chanmon_cfgs[0].logger,
1115 &chanmon_cfgs[0].fee_estimator,
1117 &chanmon_cfgs[0].keys_manager,
1119 let chain_mon_1 = test_utils::TestChainMonitor::new(
1120 Some(&chanmon_cfgs[1].chain_source),
1121 &chanmon_cfgs[1].tx_broadcaster,
1122 &chanmon_cfgs[1].logger,
1123 &chanmon_cfgs[1].fee_estimator,
1125 &chanmon_cfgs[1].keys_manager,
1127 node_cfgs[0].chain_monitor = chain_mon_0;
1128 node_cfgs[1].chain_monitor = chain_mon_1;
1129 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1130 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1132 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
1134 // Check that the persisted channel data is empty before any channels are
1136 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1137 assert_eq!(persisted_chan_data.len(), 0);
1139 // Create some initial channel
1140 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1142 // Send a few payments to advance the updates a bit
1143 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1144 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1146 // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1147 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
1148 let (_, monitor) = &persisted_chan_data[0];
1149 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1152 .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
1155 // Do the stale update cleanup
1156 persister_0.cleanup_stale_updates(false).unwrap();
1158 // Confirm the stale update is unreadable/gone
1161 .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
1165 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1166 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1167 check_closed_broadcast!(nodes[0], true);
1168 check_added_monitors!(nodes[0], 1);
1170 // Write an update near u64::MAX
1173 .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
1176 // Do the stale update cleanup
1177 persister_0.cleanup_stale_updates(false).unwrap();
1179 // Confirm the stale update is unreadable/gone
1182 .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
1186 fn persist_fn<P: Deref, ChannelSigner: WriteableEcdsaChannelSigner>(_persist: P) -> bool where P::Target: Persist<ChannelSigner> {
1191 fn kvstore_trait_object_usage() {
1192 let store: Arc<dyn KVStore + Send + Sync> = Arc::new(TestStore::new(false));
1193 assert!(persist_fn::<_, TestChannelSigner>(store.clone()));