1 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4 // You may not use this file except in accordance with one or both of these
7 //! This module contains a simple key-value store trait [`KVStore`] that
8 //! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9 //! and [`ChannelMonitor`] all in one place.
12 use core::convert::{TryFrom, TryInto};
14 use bitcoin::hashes::hex::{FromHex, ToHex};
15 use bitcoin::{BlockHash, Txid};
17 use crate::{io, log_error};
18 use crate::alloc::string::ToString;
19 use crate::prelude::*;
22 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
23 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
24 use crate::sign::{EntropySource, NodeSigner, WriteableEcdsaChannelSigner, SignerProvider};
25 use crate::chain::transaction::OutPoint;
26 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
27 use crate::ln::channelmanager::ChannelManager;
28 use crate::routing::router::Router;
29 use crate::routing::gossip::NetworkGraph;
30 use crate::routing::scoring::WriteableScore;
31 use crate::util::logger::Logger;
32 use crate::util::ser::{Readable, ReadableArgs, Writeable};
34 /// The alphabet of characters allowed for namespaces and keys.
35 pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
37 /// The maximum number of characters namespaces and keys may have.
38 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
40 /// The namespace under which the [`ChannelManager`] will be persisted.
41 pub const CHANNEL_MANAGER_PERSISTENCE_NAMESPACE: &str = "";
42 /// The sub-namespace under which the [`ChannelManager`] will be persisted.
43 pub const CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE: &str = "";
44 /// The key under which the [`ChannelManager`] will be persisted.
45 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
47 /// The namespace under which [`ChannelMonitor`]s will be persisted.
48 pub const CHANNEL_MONITOR_PERSISTENCE_NAMESPACE: &str = "monitors";
49 /// The sub-namespace under which [`ChannelMonitor`]s will be persisted.
50 pub const CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE: &str = "";
51 /// The namespace under which [`ChannelMonitorUpdate`]s will be persisted.
52 pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE: &str = "monitor_updates";
54 /// The namespace under which the [`NetworkGraph`] will be persisted.
55 pub const NETWORK_GRAPH_PERSISTENCE_NAMESPACE: &str = "";
56 /// The sub-namespace under which the [`NetworkGraph`] will be persisted.
57 pub const NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE: &str = "";
58 /// The key under which the [`NetworkGraph`] will be persisted.
59 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
61 /// The namespace under which the [`WriteableScore`] will be persisted.
62 pub const SCORER_PERSISTENCE_NAMESPACE: &str = "";
63 /// The sub-namespace under which the [`WriteableScore`] will be persisted.
64 pub const SCORER_PERSISTENCE_SUB_NAMESPACE: &str = "";
65 /// The key under which the [`WriteableScore`] will be persisted.
66 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
68 /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
70 /// This serves to prevent someone from accidentally loading such monitors (which may need
71 /// updates applied to be current) with another implementation.
72 pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
74 /// Provides an interface that allows storage and retrieval of persisted values that are associated
77 /// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
78 /// and `sub_namespace`s. Implementations of this trait are free to handle them in different ways,
79 /// as long as per-namespace key uniqueness is asserted.
81 /// Keys and namespaces are required to be valid ASCII strings in the range of
82 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
83 /// primary namespaces and sub-namespaces (`""`) are assumed to be a valid, however, if
84 /// `primary_namespace` is empty, `sub_namespace` is required to be empty, too. This means that
85 /// concerns should always be separated by primary namespace first, before sub-namespaces are used.
86 /// While the number of primary namespaces will be relatively small and is determined at compile
87 /// time, there may be many sub-namespaces per primary namespace. Note that per-namespace
88 /// uniqueness needs to also hold for keys *and* namespaces in any given namespace, i.e., conflicts
89 /// between keys and equally named primary-namespaces/sub-namespaces must be avoided.
91 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
92 /// interface can use a concatenation of `[{primary_namespace}/[{sub_namespace}/]]{key}` to recover
93 /// a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
95 /// Returns the data stored for the given `primary_namespace`, `sub_namespace`, and `key`.
97 /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
98 /// `primary_namespace` and `sub_namespace`.
100 /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
101 fn read(&self, primary_namespace: &str, sub_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
102 /// Persists the given data under the given `key`.
104 /// Will create the given `primary_namespace` and `sub_namespace` if not already present in the
106 fn write(&self, primary_namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
107 /// Removes any data that had previously been persisted under the given `key`.
109 /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
110 /// remove the given `key` at some point in time after the method returns, e.g., as part of an
111 /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
112 /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
114 /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
115 /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
116 /// potentially get lost on crash after the method returns. Therefore, this flag should only be
117 /// set for `remove` operations that can be safely replayed at a later time.
119 /// Returns successfully if no data will be stored for the given `primary_namespace`,
120 /// `sub_namespace`, and `key`, independently of whether it was present before its invokation
122 fn remove(&self, primary_namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
123 /// Returns a list of keys that are stored under the given `sub_namespace` in
124 /// `primary_namespace`.
126 /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
127 /// returned keys. Returns an empty list if `primary_namespace` or `sub_namespace` is unknown.
128 fn list(&self, primary_namespace: &str, sub_namespace: &str) -> Result<Vec<String>, io::Error>;
131 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
132 pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
133 where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
134 T::Target: 'static + BroadcasterInterface,
135 ES::Target: 'static + EntropySource,
136 NS::Target: 'static + NodeSigner,
137 SP::Target: 'static + SignerProvider,
138 F::Target: 'static + FeeEstimator,
139 R::Target: 'static + Router,
140 L::Target: 'static + Logger,
142 /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
143 fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error>;
145 /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
146 fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
148 /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
149 fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
153 impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
154 where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
155 T::Target: 'static + BroadcasterInterface,
156 ES::Target: 'static + EntropySource,
157 NS::Target: 'static + NodeSigner,
158 SP::Target: 'static + SignerProvider,
159 F::Target: 'static + FeeEstimator,
160 R::Target: 'static + Router,
161 L::Target: 'static + Logger,
163 /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
164 fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
165 self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
166 CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
167 CHANNEL_MANAGER_PERSISTENCE_KEY,
168 &channel_manager.encode())
171 /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
172 fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
173 self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
174 NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
175 NETWORK_GRAPH_PERSISTENCE_KEY,
176 &network_graph.encode())
179 /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
180 fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
181 self.write(SCORER_PERSISTENCE_NAMESPACE,
182 SCORER_PERSISTENCE_SUB_NAMESPACE,
183 SCORER_PERSISTENCE_KEY,
188 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
189 // TODO: We really need a way for the persister to inform the user that its time to crash/shut
190 // down once these start returning failure.
191 // Then we should return InProgress rather than UnrecoverableError, implying we should probably
192 // just shut down the node since we're not retrying persistence!
194 fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
195 let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
197 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
198 CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
199 &key, &monitor.encode())
201 Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
202 Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
206 fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
207 let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
209 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
210 CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
211 &key, &monitor.encode())
213 Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
214 Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError
219 /// Read previously persisted [`ChannelMonitor`]s from the store.
220 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
221 kv_store: K, entropy_source: ES, signer_provider: SP,
222 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
225 ES::Target: EntropySource + Sized,
226 SP::Target: SignerProvider + Sized,
228 let mut res = Vec::new();
230 for stored_key in kv_store.list(
231 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
233 if stored_key.len() < 66 {
234 return Err(io::Error::new(
235 io::ErrorKind::InvalidData,
236 "Stored key has invalid length"));
239 let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
240 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
243 let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
244 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
247 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
248 &mut io::Cursor::new(
249 kv_store.read(CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE, &stored_key)?),
250 (&*entropy_source, &*signer_provider),
252 Ok((block_hash, channel_monitor)) => {
253 if channel_monitor.get_funding_txo().0.txid != txid
254 || channel_monitor.get_funding_txo().0.index != index
256 return Err(io::Error::new(
257 io::ErrorKind::InvalidData,
258 "ChannelMonitor was stored under the wrong key",
261 res.push((block_hash, channel_monitor));
264 return Err(io::Error::new(
265 io::ErrorKind::InvalidData,
266 "Failed to read ChannelMonitor"
274 /// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
275 /// [`ChannelMonitorUpdate`]s.
279 /// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
280 /// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
281 /// deleting) and complexity. This is because it writes channel monitor differential updates,
282 /// whereas the other (default) implementation rewrites the entire monitor on each update. For
283 /// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
284 /// of megabytes (or more). Updates can be as small as a few hundred bytes.
286 /// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
287 /// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
288 /// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
289 /// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
290 /// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
293 /// # Storing monitors
295 /// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
297 /// - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
298 /// - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
300 /// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_NAMESPACE`], using the
301 /// familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
303 /// Each [`ChannelMonitorUpdate`] is stored in a dynamic sub-namespace, as follows:
305 /// - primary-namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE`]
306 /// - sub-namespace: [the monitor's encoded outpoint name]
308 /// Under that sub-namespace, each update is stored with a number string, like `21`, which
309 /// represents its `update_id` value.
311 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
313 /// - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
316 /// Full channel monitors would be stored at a single key:
318 /// `[CHANNEL_MONITOR_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
320 /// Updates would be stored as follows (with `/` delimiting primary-namespace/sub-namespace/key):
323 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
324 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
325 /// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
329 /// # Reading channel state from storage
331 /// Channel state can be reconstructed by calling
332 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
333 /// list channel monitors themselves and load channels individually using
334 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
336 /// ## EXTREMELY IMPORTANT
338 /// It is extremely important that your [`KVStore::read`] implementation uses the
339 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
340 /// that circumstance (not when there is really a permissions error, for example). This is because
341 /// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
342 /// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
343 /// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
345 /// # Pruning stale channel updates
347 /// Stale updates are pruned when a full monitor is written. The old monitor is first read, and if
348 /// that succeeds, updates in the range between the old and new monitors are deleted. The `lazy`
349 /// flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
350 /// will complete. However, stale updates are not a problem for data integrity, since updates are
351 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
353 /// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
354 /// would like to get rid of them, consider using the
355 /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
356 pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref>
360 ES::Target: EntropySource + Sized,
361 SP::Target: SignerProvider + Sized,
365 maximum_pending_updates: u64,
371 impl<K: Deref, L: Deref, ES: Deref, SP: Deref>
372 MonitorUpdatingPersister<K, L, ES, SP>
376 ES::Target: EntropySource + Sized,
377 SP::Target: SignerProvider + Sized,
379 /// Constructs a new [`MonitorUpdatingPersister`].
381 /// The `maximum_pending_updates` parameter controls how many updates may be stored before a
382 /// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
383 /// consolidation will frequently occur with fewer updates than what you set here; this number
384 /// is merely the maximum that may be stored. When setting this value, consider that for higher
385 /// values of `maximum_pending_updates`:
387 /// - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
388 /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
389 /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
390 /// - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
391 /// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
392 /// less frequent "waves."
393 /// - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
394 /// [`MonitorUpdatingPersister::cleanup_stale_updates`].
396 kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
400 ES::Target: EntropySource + Sized,
401 SP::Target: SignerProvider + Sized,
403 MonitorUpdatingPersister {
406 maximum_pending_updates,
412 /// Reads all stored channel monitors, along with any stored updates for them.
414 /// It is extremely important that your [`KVStore::read`] implementation uses the
415 /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
416 /// documentation for [`MonitorUpdatingPersister`].
417 pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref + Clone>(
418 &self, broadcaster: B, fee_estimator: F,
419 ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
421 ES::Target: EntropySource + Sized,
422 SP::Target: SignerProvider + Sized,
423 B::Target: BroadcasterInterface,
424 F::Target: FeeEstimator,
426 let monitor_list = self.kv_store.list(
427 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
428 CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
430 let mut res = Vec::with_capacity(monitor_list.len());
431 for monitor_key in monitor_list {
432 res.push(self.read_channel_monitor_with_updates(
434 fee_estimator.clone(),
441 /// Read a single channel monitor, along with any stored updates for it.
443 /// It is extremely important that your [`KVStore::read`] implementation uses the
444 /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
445 /// documentation for [`MonitorUpdatingPersister`].
447 /// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
448 /// [`OutPoint`], with an underscore `_` between them. For example, given:
450 /// - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
453 /// The correct `monitor_key` would be:
454 /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
456 /// Loading a large number of monitors will be faster if done in parallel. You can use this
457 /// function to accomplish this. Take care to limit the number of parallel readers.
458 pub fn read_channel_monitor_with_updates<B: Deref, F: Deref + Clone>(
459 &self, broadcaster: &B, fee_estimator: F, monitor_key: String,
460 ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
462 ES::Target: EntropySource + Sized,
463 SP::Target: SignerProvider + Sized,
464 B::Target: BroadcasterInterface,
465 F::Target: FeeEstimator,
467 let monitor_name = MonitorName::new(monitor_key)?;
468 let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
469 let mut current_update_id = monitor.get_latest_update_id();
471 current_update_id = match current_update_id.checked_add(1) {
472 Some(next_update_id) => next_update_id,
475 let update_name = UpdateName::from(current_update_id);
476 let update = match self.read_monitor_update(&monitor_name, &update_name) {
477 Ok(update) => update,
478 Err(err) if err.kind() == io::ErrorKind::NotFound => {
479 // We can't find any more updates, so we are done.
482 Err(err) => return Err(err),
485 monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger)
489 "Monitor update failed. monitor: {} update: {} reason: {:?}",
490 monitor_name.as_str(),
491 update_name.as_str(),
494 io::Error::new(io::ErrorKind::Other, "Monitor update failed")
497 Ok((block_hash, monitor))
500 /// Read a channel monitor.
502 &self, monitor_name: &MonitorName,
503 ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error> {
504 let outpoint: OutPoint = monitor_name.try_into()?;
505 let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
506 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
507 CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
508 monitor_name.as_str(),
510 // Discard the sentinel bytes if found.
511 if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
512 monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
514 match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
516 (&*self.entropy_source, &*self.signer_provider),
518 Ok((blockhash, channel_monitor)) => {
519 if channel_monitor.get_funding_txo().0.txid != outpoint.txid
520 || channel_monitor.get_funding_txo().0.index != outpoint.index
524 "ChannelMonitor {} was stored under the wrong key!",
525 monitor_name.as_str()
528 io::ErrorKind::InvalidData,
529 "ChannelMonitor was stored under the wrong key",
532 Ok((blockhash, channel_monitor))
538 "Failed to read ChannelMonitor {}, reason: {}",
539 monitor_name.as_str(),
542 Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
547 /// Read a channel monitor update.
548 fn read_monitor_update(
549 &self, monitor_name: &MonitorName, update_name: &UpdateName,
550 ) -> Result<ChannelMonitorUpdate, io::Error> {
551 let update_bytes = self.kv_store.read(
552 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
553 monitor_name.as_str(),
554 update_name.as_str(),
556 ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
559 "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
560 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
561 monitor_name.as_str(),
562 update_name.as_str(),
565 io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
569 /// Cleans up stale updates for all monitors.
571 /// This function works by first listing all monitors, and then for each of them, listing all
572 /// updates. The updates that have an `update_id` less than or equal to than the stored monitor
573 /// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
574 /// be passed to [`KVStore::remove`].
575 pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
576 let monitor_keys = self.kv_store.list(
577 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
578 CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
580 for monitor_key in monitor_keys {
581 let monitor_name = MonitorName::new(monitor_key)?;
582 let (_, current_monitor) = self.read_monitor(&monitor_name)?;
585 .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str())?;
586 for update in updates {
587 let update_name = UpdateName::new(update)?;
588 // if the update_id is lower than the stored monitor, delete
589 if update_name.0 <= current_monitor.get_latest_update_id() {
590 self.kv_store.remove(
591 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
592 monitor_name.as_str(),
593 update_name.as_str(),
603 impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
604 Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
608 ES::Target: EntropySource + Sized,
609 SP::Target: SignerProvider + Sized,
611 /// Persists a new channel. This means writing the entire monitor to the
612 /// parametrized [`KVStore`].
613 fn persist_new_channel(
614 &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
615 _monitor_update_call_id: MonitorUpdateId,
616 ) -> chain::ChannelMonitorUpdateStatus {
617 // Determine the proper key for this monitor
618 let monitor_name = MonitorName::from(funding_txo);
619 let maybe_old_monitor = self.read_monitor(&monitor_name);
620 match maybe_old_monitor {
621 Ok((_, ref old_monitor)) => {
622 // Check that this key isn't already storing a monitor with a higher update_id
624 if old_monitor.get_latest_update_id() > monitor.get_latest_update_id() {
627 "Tried to write a monitor at the same outpoint {} with a higher update_id!",
628 monitor_name.as_str()
630 return chain::ChannelMonitorUpdateStatus::UnrecoverableError;
633 // This means the channel monitor is new.
634 Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
635 _ => return chain::ChannelMonitorUpdateStatus::UnrecoverableError,
637 // Serialize and write the new monitor
638 let mut monitor_bytes = Vec::with_capacity(
639 MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
641 monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
642 monitor.write(&mut monitor_bytes).unwrap();
643 match self.kv_store.write(
644 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
645 CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
646 monitor_name.as_str(),
650 // Assess cleanup. Typically, we'll clean up only between the last two known full
652 if let Ok((_, old_monitor)) = maybe_old_monitor {
653 let start = old_monitor.get_latest_update_id();
654 let end = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
655 // We don't want to clean the rest of u64, so just do possible pending
656 // updates. Note that we never write updates at
657 // `CLOSED_CHANNEL_UPDATE_ID`.
659 start.saturating_add(self.maximum_pending_updates),
660 CLOSED_CHANNEL_UPDATE_ID - 1,
663 monitor.get_latest_update_id().saturating_sub(1)
665 // We should bother cleaning up only if there's at least one update
667 for update_id in start..=end {
668 let update_name = UpdateName::from(update_id);
669 #[cfg(debug_assertions)]
672 self.read_monitor_update(&monitor_name, &update_name)
674 // Assert that we are reading what we think we are.
675 debug_assert_eq!(update.update_id, update_name.0);
676 } else if update_id != start && monitor.get_latest_update_id() != CLOSED_CHANNEL_UPDATE_ID
678 // We're deleting something we should know doesn't exist.
680 "failed to read monitor update {}",
684 // On closed channels, we will unavoidably try to read
685 // non-existent updates since we have to guess at the range of
686 // stale updates, so do nothing.
688 if let Err(e) = self.kv_store.remove(
689 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
690 monitor_name.as_str(),
691 update_name.as_str(),
696 "error cleaning up channel monitor updates for monitor {}, reason: {}",
697 monitor_name.as_str(),
703 chain::ChannelMonitorUpdateStatus::Completed
708 "error writing channel monitor {}/{}/{} reason: {}",
709 CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
710 CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
711 monitor_name.as_str(),
714 chain::ChannelMonitorUpdateStatus::UnrecoverableError
719 /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
721 /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
723 /// - No full monitor is found in [`KVStore`]
724 /// - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
725 /// - LDK commands re-persisting the entire monitor through this function, specifically when
726 /// `update` is `None`.
727 /// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
728 fn update_persisted_channel(
729 &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
730 monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
731 ) -> chain::ChannelMonitorUpdateStatus {
732 // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
733 // ChannelMonitorUpdate's update_id.
734 if let Some(update) = update {
735 if update.update_id != CLOSED_CHANNEL_UPDATE_ID
736 && update.update_id % self.maximum_pending_updates != 0
738 let monitor_name = MonitorName::from(funding_txo);
739 let update_name = UpdateName::from(update.update_id);
740 match self.kv_store.write(
741 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
742 monitor_name.as_str(),
743 update_name.as_str(),
746 Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
750 "error writing channel monitor update {}/{}/{} reason: {}",
751 CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
752 monitor_name.as_str(),
753 update_name.as_str(),
756 chain::ChannelMonitorUpdateStatus::UnrecoverableError
760 // We could write this update, but it meets criteria of our design that call for a full monitor write.
761 self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
764 // There is no update given, so we must persist a new monitor.
765 self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
770 /// A struct representing a name for a monitor.
772 struct MonitorName(String);
775 /// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
776 /// be formed from the given `name`.
777 pub fn new(name: String) -> Result<Self, io::Error> {
778 MonitorName::do_try_into_outpoint(&name)?;
781 /// Convert this monitor name to a str.
782 pub fn as_str(&self) -> &str {
785 /// Attempt to form a valid [`OutPoint`] from a given name string.
786 fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
787 let mut parts = name.splitn(2, '_');
788 let txid = if let Some(part) = parts.next() {
789 Txid::from_hex(part).map_err(|_| {
790 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
793 return Err(io::Error::new(
794 io::ErrorKind::InvalidData,
795 "Stored monitor key is not a splittable string",
798 let index = if let Some(part) = parts.next() {
799 part.parse().map_err(|_| {
800 io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
803 return Err(io::Error::new(
804 io::ErrorKind::InvalidData,
805 "No tx index value found after underscore in stored key",
808 Ok(OutPoint { txid, index })
812 impl TryFrom<&MonitorName> for OutPoint {
813 type Error = io::Error;
815 fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
816 MonitorName::do_try_into_outpoint(&value.0)
820 impl From<OutPoint> for MonitorName {
821 fn from(value: OutPoint) -> Self {
822 MonitorName(format!("{}_{}", value.txid.to_hex(), value.index))
826 /// A struct representing a name for an update.
828 struct UpdateName(u64, String);
831 /// Constructs an [`UpdateName`], after verifying that an update sequence ID
832 /// can be derived from the given `name`.
833 pub fn new(name: String) -> Result<Self, io::Error> {
834 match name.parse::<u64>() {
835 Ok(u) => Ok(u.into()),
837 Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
842 /// Convert this monitor update name to a &str
843 pub fn as_str(&self) -> &str {
848 impl From<u64> for UpdateName {
849 fn from(value: u64) -> Self {
850 Self(value, value.to_string())
857 use crate::chain::chainmonitor::Persist;
858 use crate::chain::ChannelMonitorUpdateStatus;
859 use crate::events::{ClosureReason, MessageSendEventsProvider};
860 use crate::ln::functional_test_utils::*;
861 use crate::util::test_utils::{self, TestLogger, TestStore};
862 use crate::{check_added_monitors, check_closed_broadcast};
864 const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
867 fn converts_u64_to_update_name() {
868 assert_eq!(UpdateName::from(0).as_str(), "0");
869 assert_eq!(UpdateName::from(21).as_str(), "21");
870 assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
874 fn bad_update_name_fails() {
875 assert!(UpdateName::new("deadbeef".to_string()).is_err());
876 assert!(UpdateName::new("-1".to_string()).is_err());
880 fn monitor_from_outpoint_works() {
881 let monitor_name1 = MonitorName::from(OutPoint {
882 txid: Txid::from_hex("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
885 assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
887 let monitor_name2 = MonitorName::from(OutPoint {
888 txid: Txid::from_hex("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
891 assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
895 fn bad_monitor_string_fails() {
896 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()).is_err());
897 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()).is_err());
898 assert!(MonitorName::new("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()).is_err());
901 // Exercise the `MonitorUpdatingPersister` with real channels and payments.
903 fn persister_with_real_monitors() {
904 // This value is used later to limit how many iterations we perform.
905 let test_max_pending_updates = 7;
906 let chanmon_cfgs = create_chanmon_cfgs(4);
907 let persister_0 = MonitorUpdatingPersister {
908 kv_store: &TestStore::new(false),
909 logger: &TestLogger::new(),
910 maximum_pending_updates: test_max_pending_updates,
911 entropy_source: &chanmon_cfgs[0].keys_manager,
912 signer_provider: &chanmon_cfgs[0].keys_manager,
914 let persister_1 = MonitorUpdatingPersister {
915 kv_store: &TestStore::new(false),
916 logger: &TestLogger::new(),
917 // Intentionally set this to a smaller value to test a different alignment.
918 maximum_pending_updates: 3,
919 entropy_source: &chanmon_cfgs[1].keys_manager,
920 signer_provider: &chanmon_cfgs[1].keys_manager,
922 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
923 let chain_mon_0 = test_utils::TestChainMonitor::new(
924 Some(&chanmon_cfgs[0].chain_source),
925 &chanmon_cfgs[0].tx_broadcaster,
926 &chanmon_cfgs[0].logger,
927 &chanmon_cfgs[0].fee_estimator,
929 &chanmon_cfgs[0].keys_manager,
931 let chain_mon_1 = test_utils::TestChainMonitor::new(
932 Some(&chanmon_cfgs[1].chain_source),
933 &chanmon_cfgs[1].tx_broadcaster,
934 &chanmon_cfgs[1].logger,
935 &chanmon_cfgs[1].fee_estimator,
937 &chanmon_cfgs[1].keys_manager,
939 node_cfgs[0].chain_monitor = chain_mon_0;
940 node_cfgs[1].chain_monitor = chain_mon_1;
941 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
942 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
944 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
945 let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
947 // Check that the persisted channel data is empty before any channels are
949 let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
950 broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
951 assert_eq!(persisted_chan_data_0.len(), 0);
952 let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
953 broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
954 assert_eq!(persisted_chan_data_1.len(), 0);
956 // Helper to make sure the channel is on the expected update ID.
957 macro_rules! check_persisted_data {
958 ($expected_update_id: expr) => {
959 persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
960 broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
961 // check that we stored only one monitor
962 assert_eq!(persisted_chan_data_0.len(), 1);
963 for (_, mon) in persisted_chan_data_0.iter() {
964 // check that when we read it, we got the right update id
965 assert_eq!(mon.get_latest_update_id(), $expected_update_id);
966 // if the CM is at the correct update id without updates, ensure no updates are stored
967 let monitor_name = MonitorName::from(mon.get_funding_txo().0);
968 let (_, cm_0) = persister_0.read_monitor(&monitor_name).unwrap();
969 if cm_0.get_latest_update_id() == $expected_update_id {
971 persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
972 monitor_name.as_str()).unwrap().len(),
974 "updates stored when they shouldn't be in persister 0"
978 persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
979 broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
980 assert_eq!(persisted_chan_data_1.len(), 1);
981 for (_, mon) in persisted_chan_data_1.iter() {
982 assert_eq!(mon.get_latest_update_id(), $expected_update_id);
983 let monitor_name = MonitorName::from(mon.get_funding_txo().0);
984 let (_, cm_1) = persister_1.read_monitor(&monitor_name).unwrap();
985 if cm_1.get_latest_update_id() == $expected_update_id {
987 persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
988 monitor_name.as_str()).unwrap().len(),
990 "updates stored when they shouldn't be in persister 1"
997 // Create some initial channel and check that a channel was persisted.
998 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
999 check_persisted_data!(0);
1001 // Send a few payments and make sure the monitors are updated to the latest.
1002 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1003 check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
1004 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1005 check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
1007 // Send a few more payments to try all the alignments of max pending updates with
1008 // updates for a payment sent and received.
1010 for i in 3..=test_max_pending_updates * 2 {
1019 send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1020 check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1023 // Force close because cooperative close doesn't result in any persisted
1025 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1027 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1028 check_closed_broadcast!(nodes[0], true);
1029 check_added_monitors!(nodes[0], 1);
1031 let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1032 assert_eq!(node_txn.len(), 1);
1034 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
1036 check_closed_broadcast!(nodes[1], true);
1037 check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
1038 check_added_monitors!(nodes[1], 1);
1040 // Make sure everything is persisted as expected after close.
1041 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
1043 // Make sure the expected number of stale updates is present.
1044 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1045 let (_, monitor) = &persisted_chan_data[0];
1046 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1047 // The channel should have 0 updates, as it wrote a full monitor and consolidated.
1048 assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1049 assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
1052 // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1053 // monitor or update with it results in the persister returning an UnrecoverableError status.
1055 fn unrecoverable_error_on_write_failure() {
1056 // Set up a dummy channel and force close. This will produce a monitor
1057 // that we can then use to test persistence.
1058 let chanmon_cfgs = create_chanmon_cfgs(2);
1059 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1060 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1061 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1062 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1063 nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
1064 check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
1066 let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1067 let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
1068 let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
1069 let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1070 let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
1071 let test_txo = OutPoint { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
1073 let ro_persister = MonitorUpdatingPersister {
1074 kv_store: &TestStore::new(true),
1075 logger: &TestLogger::new(),
1076 maximum_pending_updates: 11,
1077 entropy_source: node_cfgs[0].keys_manager,
1078 signer_provider: node_cfgs[0].keys_manager,
1080 match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
1081 ChannelMonitorUpdateStatus::UnrecoverableError => {
1084 ChannelMonitorUpdateStatus::Completed => {
1085 panic!("Completed persisting new channel when shouldn't have")
1087 ChannelMonitorUpdateStatus::InProgress => {
1088 panic!("Returned InProgress when shouldn't have")
1091 match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
1092 ChannelMonitorUpdateStatus::UnrecoverableError => {
1095 ChannelMonitorUpdateStatus::Completed => {
1096 panic!("Completed persisting new channel when shouldn't have")
1098 ChannelMonitorUpdateStatus::InProgress => {
1099 panic!("Returned InProgress when shouldn't have")
1102 added_monitors.clear();
1104 nodes[1].node.get_and_clear_pending_msg_events();
1107 // Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1109 fn clean_stale_updates_works() {
1110 let test_max_pending_updates = 7;
1111 let chanmon_cfgs = create_chanmon_cfgs(3);
1112 let persister_0 = MonitorUpdatingPersister {
1113 kv_store: &TestStore::new(false),
1114 logger: &TestLogger::new(),
1115 maximum_pending_updates: test_max_pending_updates,
1116 entropy_source: &chanmon_cfgs[0].keys_manager,
1117 signer_provider: &chanmon_cfgs[0].keys_manager,
1119 let persister_1 = MonitorUpdatingPersister {
1120 kv_store: &TestStore::new(false),
1121 logger: &TestLogger::new(),
1122 maximum_pending_updates: test_max_pending_updates,
1123 entropy_source: &chanmon_cfgs[1].keys_manager,
1124 signer_provider: &chanmon_cfgs[1].keys_manager,
1126 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1127 let chain_mon_0 = test_utils::TestChainMonitor::new(
1128 Some(&chanmon_cfgs[0].chain_source),
1129 &chanmon_cfgs[0].tx_broadcaster,
1130 &chanmon_cfgs[0].logger,
1131 &chanmon_cfgs[0].fee_estimator,
1133 &chanmon_cfgs[0].keys_manager,
1135 let chain_mon_1 = test_utils::TestChainMonitor::new(
1136 Some(&chanmon_cfgs[1].chain_source),
1137 &chanmon_cfgs[1].tx_broadcaster,
1138 &chanmon_cfgs[1].logger,
1139 &chanmon_cfgs[1].fee_estimator,
1141 &chanmon_cfgs[1].keys_manager,
1143 node_cfgs[0].chain_monitor = chain_mon_0;
1144 node_cfgs[1].chain_monitor = chain_mon_1;
1145 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1146 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1148 let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
1150 // Check that the persisted channel data is empty before any channels are
1152 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1153 assert_eq!(persisted_chan_data.len(), 0);
1155 // Create some initial channel
1156 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1158 // Send a few payments to advance the updates a bit
1159 send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1160 send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1162 // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1163 let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
1164 let (_, monitor) = &persisted_chan_data[0];
1165 let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1168 .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
1171 // Do the stale update cleanup
1172 persister_0.cleanup_stale_updates(false).unwrap();
1174 // Confirm the stale update is unreadable/gone
1177 .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
1181 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
1182 check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
1183 check_closed_broadcast!(nodes[0], true);
1184 check_added_monitors!(nodes[0], 1);
1186 // Write an update near u64::MAX
1189 .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
1192 // Do the stale update cleanup
1193 persister_0.cleanup_stale_updates(false).unwrap();
1195 // Confirm the stale update is unreadable/gone
1198 .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())