Merge pull request #2775 from benthecarman/sign-psbt
[rust-lightning] / lightning / src / util / persist.rs
index dbe3ee8161ef4498904709a0c7ad5080d1bcb7ac..a9f534ee4d3413df6489ea1988c13e75b13272b7 100644 (file)
 use core::cmp;
 use core::convert::{TryFrom, TryInto};
 use core::ops::Deref;
-use bitcoin::hashes::hex::{FromHex, ToHex};
+use core::str::FromStr;
 use bitcoin::{BlockHash, Txid};
 
 use crate::{io, log_error};
 use crate::alloc::string::ToString;
-use crate::prelude::{Vec, String};
+use crate::prelude::*;
 
 use crate::chain;
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
-use crate::sign::{EntropySource, NodeSigner, WriteableEcdsaChannelSigner, SignerProvider};
+use crate::sign::{EntropySource, NodeSigner, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
 use crate::chain::transaction::OutPoint;
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
 use crate::ln::channelmanager::ChannelManager;
@@ -37,31 +37,31 @@ pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCD
 /// The maximum number of characters namespaces and keys may have.
 pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
 
-/// The namespace under which the [`ChannelManager`] will be persisted.
-pub const CHANNEL_MANAGER_PERSISTENCE_NAMESPACE: &str = "";
-/// The sub-namespace under which the [`ChannelManager`] will be persisted.
-pub const CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The primary namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
+/// The secondary namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
 /// The key under which the [`ChannelManager`] will be persisted.
 pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
 
-/// The namespace under which [`ChannelMonitor`]s will be persisted.
-pub const CHANNEL_MONITOR_PERSISTENCE_NAMESPACE: &str = "monitors";
-/// The sub-namespace under which [`ChannelMonitor`]s will be persisted.
-pub const CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE: &str = "";
-/// The namespace under which [`ChannelMonitorUpdate`]s will be persisted.
-pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE: &str = "monitor_updates";
-
-/// The namespace under which the [`NetworkGraph`] will be persisted.
-pub const NETWORK_GRAPH_PERSISTENCE_NAMESPACE: &str = "";
-/// The sub-namespace under which the [`NetworkGraph`] will be persisted.
-pub const NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The primary namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
+/// The secondary namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
+/// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
+pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
+
+/// The primary namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
+/// The secondary namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
 /// The key under which the [`NetworkGraph`] will be persisted.
 pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
 
-/// The namespace under which the [`WriteableScore`] will be persisted.
-pub const SCORER_PERSISTENCE_NAMESPACE: &str = "";
-/// The sub-namespace under which the [`WriteableScore`] will be persisted.
-pub const SCORER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The primary namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
+/// The secondary namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
 /// The key under which the [`WriteableScore`] will be persisted.
 pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
 
@@ -74,35 +74,38 @@ pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
 /// Provides an interface that allows storage and retrieval of persisted values that are associated
 /// with given keys.
 ///
-/// In order to avoid collisions the key space is segmented based on the given `namespace`s and
-/// `sub_namespace`s. Implementations of this trait are free to handle them in different ways, as
-/// long as per-namespace key uniqueness is asserted.
+/// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
+/// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
+/// ways, as long as per-namespace key uniqueness is asserted.
 ///
 /// Keys and namespaces are required to be valid ASCII strings in the range of
 /// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
-/// namespaces and sub-namespaces (`""`) are assumed to be a valid, however, if `namespace` is
-/// empty, `sub_namespace` is required to be empty, too. This means that concerns should always be
-/// separated by namespace first, before sub-namespaces are used. While the number of namespaces
-/// will be relatively small and is determined at compile time, there may be many sub-namespaces
-/// per namespace. Note that per-namespace uniqueness needs to also hold for keys *and*
-/// namespaces/sub-namespaces in any given namespace/sub-namespace, i.e., conflicts between keys
-/// and equally named namespaces/sub-namespaces must be avoided.
+/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
+/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
+/// that concerns should always be separated by primary namespace first, before secondary
+/// namespaces are used. While the number of primary namespaces will be relatively small and is
+/// determined at compile time, there may be many secondary namespaces per primary namespace. Note
+/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
+/// namespace, i.e., conflicts between keys and equally named
+/// primary namespaces/secondary namespaces must be avoided.
 ///
 /// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
-/// interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the
-/// data model previously assumed by `KVStorePersister::persist`.
+/// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
+/// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
 pub trait KVStore {
-       /// Returns the data stored for the given `namespace`, `sub_namespace`, and `key`.
+       /// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
+       /// `key`.
        ///
        /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
-       /// `namespace` and `sub_namespace`.
+       /// `primary_namespace` and `secondary_namespace`.
        ///
        /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
-       fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>>;
+       fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> Result<Vec<u8>, io::Error>;
        /// Persists the given data under the given `key`.
        ///
-       /// Will create the given `namespace` and `sub_namespace` if not already present in the store.
-       fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()>;
+       /// Will create the given `primary_namespace` and `secondary_namespace` if not already present
+       /// in the store.
+       fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> Result<(), io::Error>;
        /// Removes any data that had previously been persisted under the given `key`.
        ///
        /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
@@ -115,19 +118,21 @@ pub trait KVStore {
        /// potentially get lost on crash after the method returns. Therefore, this flag should only be
        /// set for `remove` operations that can be safely replayed at a later time.
        ///
-       /// Returns successfully if no data will be stored for the given `namespace`, `sub_namespace`, and
-       /// `key`, independently of whether it was present before its invokation or not.
-       fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()>;
-       /// Returns a list of keys that are stored under the given `sub_namespace` in `namespace`.
+       /// Returns successfully if no data will be stored for the given `primary_namespace`,
+       /// `secondary_namespace`, and `key`, independently of whether it was present before its
+       /// invokation or not.
+       fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> Result<(), io::Error>;
+       /// Returns a list of keys that are stored under the given `secondary_namespace` in
+       /// `primary_namespace`.
        ///
        /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
-       /// returned keys. Returns an empty list if `namespace` or `sub_namespace` is unknown.
-       fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>>;
+       /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
+       fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> Result<Vec<String>, io::Error>;
 }
 
 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
 pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
-       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
                NS::Target: 'static + NodeSigner,
@@ -148,7 +153,7 @@ pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F:
 
 
 impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
-       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
                NS::Target: 'static + NodeSigner,
@@ -159,26 +164,26 @@ impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Der
 {
        /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
        fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
-               self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
-                                  CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
-                                  CHANNEL_MANAGER_PERSISTENCE_KEY,
-                                  &channel_manager.encode())
+               self.write(CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
+                       CHANNEL_MANAGER_PERSISTENCE_KEY,
+                       &channel_manager.encode())
        }
 
        /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
        fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
-               self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
-                                  NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
-                                  NETWORK_GRAPH_PERSISTENCE_KEY,
-                                  &network_graph.encode())
+               self.write(NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
+                       NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
+                       NETWORK_GRAPH_PERSISTENCE_KEY,
+                       &network_graph.encode())
        }
 
        /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
        fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
-               self.write(SCORER_PERSISTENCE_NAMESPACE,
-                                  SCORER_PERSISTENCE_SUB_NAMESPACE,
-                                  SCORER_PERSISTENCE_KEY,
-                                  &scorer.encode())
+               self.write(SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
+                       SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
+                       SCORER_PERSISTENCE_KEY,
+                       &scorer.encode())
        }
 }
 
@@ -189,10 +194,10 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
        // just shut down the node since we're not retrying persistence!
 
        fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
                match self.write(
-                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                        &key, &monitor.encode())
                {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
@@ -201,10 +206,10 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
        }
 
        fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
                match self.write(
-                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                        &key, &monitor.encode())
                {
                        Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
@@ -216,7 +221,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
 /// Read previously persisted [`ChannelMonitor`]s from the store.
 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
        kv_store: K, entropy_source: ES, signer_provider: SP,
-) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
+) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
 where
        K::Target: KVStore,
        ES::Target: EntropySource + Sized,
@@ -225,7 +230,7 @@ where
        let mut res = Vec::new();
 
        for stored_key in kv_store.list(
-               CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
+               CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE)?
        {
                if stored_key.len() < 66 {
                        return Err(io::Error::new(
@@ -233,7 +238,7 @@ where
                                "Stored key has invalid length"));
                }
 
-               let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
+               let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
                        io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
                })?;
 
@@ -241,9 +246,9 @@ where
                        io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
                })?;
 
-               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
                        &mut io::Cursor::new(
-                               kv_store.read(CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE, &stored_key)?),
+                               kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
                        (&*entropy_source, &*signer_provider),
                ) {
                        Ok((block_hash, channel_monitor)) => {
@@ -294,15 +299,15 @@ where
 ///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
 ///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
 ///
-/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_NAMESPACE`], using the
-/// familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
+/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`],
+/// using the familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
 ///
-/// Each [`ChannelMonitorUpdate`] is stored in a dynamic sub-namespace, as follows:
+/// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
 ///
-///   - namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE`]
-///   - sub-namespace: [the monitor's encoded outpoint name]
+///   - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`]
+///   - secondary namespace: [the monitor's encoded outpoint name]
 ///
-/// Under that sub-namespace, each update is stored with a number string, like `21`, which
+/// Under that secondary namespace, each update is stored with a number string, like `21`, which
 /// represents its `update_id` value.
 ///
 /// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
@@ -312,14 +317,14 @@ where
 ///
 /// Full channel monitors would be stored at a single key:
 ///
-/// `[CHANNEL_MONITOR_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
+/// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
 ///
-/// Updates would be stored as follows (with `/` delimiting namespace/sub-namespace/key):
+/// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
 ///
 /// ```text
-/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
-/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
-/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
+/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
+/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
+/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
 /// ```
 /// ... and so on.
 ///
@@ -329,9 +334,9 @@ where
 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
 /// list channel monitors themselves and load channels individually using
 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
-/// 
+///
 /// ## EXTREMELY IMPORTANT
-/// 
+///
 /// It is extremely important that your [`KVStore::read`] implementation uses the
 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
 /// that circumstance (not when there is really a permissions error, for example). This is because
@@ -380,7 +385,7 @@ where
        /// consolidation will frequently occur with fewer updates than what you set here; this number
        /// is merely the maximum that may be stored. When setting this value, consider that for higher
        /// values of `maximum_pending_updates`:
-       /// 
+       ///
        ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
        /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
        /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
@@ -392,11 +397,7 @@ where
        pub fn new(
                kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
                signer_provider: SP,
-       ) -> Self
-       where
-               ES::Target: EntropySource + Sized,
-               SP::Target: SignerProvider + Sized,
-       {
+       ) -> Self {
                MonitorUpdatingPersister {
                        kv_store,
                        logger,
@@ -411,24 +412,22 @@ where
        /// It is extremely important that your [`KVStore::read`] implementation uses the
        /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
        /// documentation for [`MonitorUpdatingPersister`].
-       pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref + Clone>(
-               &self, broadcaster: B, fee_estimator: F,
-       ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
+       pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
+               &self, broadcaster: &B, fee_estimator: &F,
+       ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
        where
-               ES::Target: EntropySource + Sized,
-               SP::Target: SignerProvider + Sized,
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
        {
                let monitor_list = self.kv_store.list(
-                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                )?;
                let mut res = Vec::with_capacity(monitor_list.len());
                for monitor_key in monitor_list {
                        res.push(self.read_channel_monitor_with_updates(
-                               &broadcaster,
-                               fee_estimator.clone(),
+                               broadcaster,
+                               fee_estimator,
                                monitor_key,
                        )?)
                }
@@ -449,15 +448,13 @@ where
        ///
        /// The correct `monitor_key` would be:
        /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
-       /// 
+       ///
        /// Loading a large number of monitors will be faster if done in parallel. You can use this
        /// function to accomplish this. Take care to limit the number of parallel readers.
-       pub fn read_channel_monitor_with_updates<B: Deref, F: Deref + Clone>(
-               &self, broadcaster: &B, fee_estimator: F, monitor_key: String,
-       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
+       pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
+               &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
+       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
        where
-               ES::Target: EntropySource + Sized,
-               SP::Target: SignerProvider + Sized,
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
        {
@@ -479,7 +476,7 @@ where
                                Err(err) => return Err(err),
                        };
 
-                       monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger)
+                       monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger)
                                .map_err(|e| {
                                        log_error!(
                                                self.logger,
@@ -497,18 +494,18 @@ where
        /// Read a channel monitor.
        fn read_monitor(
                &self, monitor_name: &MonitorName,
-       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error> {
+       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
                let outpoint: OutPoint = monitor_name.try_into()?;
                let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
-                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                        monitor_name.as_str(),
                )?);
                // Discard the sentinel bytes if found.
                if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
                        monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
                }
-               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
                        &mut monitor_cursor,
                        (&*self.entropy_source, &*self.signer_provider),
                ) {
@@ -546,7 +543,7 @@ where
                &self, monitor_name: &MonitorName, update_name: &UpdateName,
        ) -> Result<ChannelMonitorUpdate, io::Error> {
                let update_bytes = self.kv_store.read(
-                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
+                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                        monitor_name.as_str(),
                        update_name.as_str(),
                )?;
@@ -554,7 +551,7 @@ where
                        log_error!(
                                self.logger,
                                "Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
-                               CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
+                               CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                monitor_name.as_str(),
                                update_name.as_str(),
                                e,
@@ -571,21 +568,21 @@ where
        /// be passed to [`KVStore::remove`].
        pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
                let monitor_keys = self.kv_store.list(
-                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                )?;
                for monitor_key in monitor_keys {
                        let monitor_name = MonitorName::new(monitor_key)?;
                        let (_, current_monitor) = self.read_monitor(&monitor_name)?;
                        let updates = self
                                .kv_store
-                               .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str())?;
+                               .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str())?;
                        for update in updates {
                                let update_name = UpdateName::new(update)?;
                                // if the update_id is lower than the stored monitor, delete
                                if update_name.0 <= current_monitor.get_latest_update_id() {
                                        self.kv_store.remove(
-                                               CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
+                                               CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                monitor_name.as_str(),
                                                update_name.as_str(),
                                                lazy,
@@ -597,7 +594,7 @@ where
        }
 }
 
-impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref> 
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
        Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
 where
        K::Target: KVStore,
@@ -638,8 +635,8 @@ where
                monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
                monitor.write(&mut monitor_bytes).unwrap();
                match self.kv_store.write(
-                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                        monitor_name.as_str(),
                        &monitor_bytes,
                ) {
@@ -683,7 +680,7 @@ where
                                                        // stale updates, so do nothing.
                                                }
                                                if let Err(e) = self.kv_store.remove(
-                                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
+                                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                        monitor_name.as_str(),
                                                        update_name.as_str(),
                                                        true,
@@ -703,8 +700,8 @@ where
                                log_error!(
                                        self.logger,
                                        "error writing channel monitor {}/{}/{} reason: {}",
-                                       CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
-                                       CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+                                       CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
+                                       CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                                        monitor_name.as_str(),
                                        e
                                );
@@ -735,7 +732,7 @@ where
                                let monitor_name = MonitorName::from(funding_txo);
                                let update_name = UpdateName::from(update.update_id);
                                match self.kv_store.write(
-                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
+                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                        monitor_name.as_str(),
                                        update_name.as_str(),
                                        &update.encode(),
@@ -745,7 +742,7 @@ where
                                                log_error!(
                                                        self.logger,
                                                        "error writing channel monitor update {}/{}/{} reason: {}",
-                                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
+                                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                        monitor_name.as_str(),
                                                        update_name.as_str(),
                                                        e
@@ -783,7 +780,7 @@ impl MonitorName {
        fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
                let mut parts = name.splitn(2, '_');
                let txid = if let Some(part) = parts.next() {
-                       Txid::from_hex(part).map_err(|_| {
+                       Txid::from_str(part).map_err(|_| {
                                io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
                        })?
                } else {
@@ -816,7 +813,7 @@ impl TryFrom<&MonitorName> for OutPoint {
 
 impl From<OutPoint> for MonitorName {
        fn from(value: OutPoint) -> Self {
-               MonitorName(format!("{}_{}", value.txid.to_hex(), value.index))
+               MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
        }
 }
 
@@ -876,13 +873,13 @@ mod tests {
        #[test]
        fn monitor_from_outpoint_works() {
                let monitor_name1 = MonitorName::from(OutPoint {
-                       txid: Txid::from_hex("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
+                       txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
                        index: 1,
                });
                assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
 
                let monitor_name2 = MonitorName::from(OutPoint {
-                       txid: Txid::from_hex("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
+                       txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
                        index: u16::MAX,
                });
                assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
@@ -944,17 +941,17 @@ mod tests {
                // Check that the persisted channel data is empty before any channels are
                // open.
                let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
-                       broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+                       &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                assert_eq!(persisted_chan_data_0.len(), 0);
                let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
-                       broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
+                       &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
                assert_eq!(persisted_chan_data_1.len(), 0);
 
                // Helper to make sure the channel is on the expected update ID.
                macro_rules! check_persisted_data {
                        ($expected_update_id: expr) => {
                                persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates(
-                                       broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+                                       &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                                // check that we stored only one monitor
                                assert_eq!(persisted_chan_data_0.len(), 1);
                                for (_, mon) in persisted_chan_data_0.iter() {
@@ -965,7 +962,7 @@ mod tests {
                                        let (_, cm_0) = persister_0.read_monitor(&monitor_name).unwrap();
                                        if cm_0.get_latest_update_id() == $expected_update_id {
                                                assert_eq!(
-                                                       persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
+                                                       persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                                monitor_name.as_str()).unwrap().len(),
                                                        0,
                                                        "updates stored when they shouldn't be in persister 0"
@@ -973,7 +970,7 @@ mod tests {
                                        }
                                }
                                persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates(
-                                       broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap();
+                                       &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap();
                                assert_eq!(persisted_chan_data_1.len(), 1);
                                for (_, mon) in persisted_chan_data_1.iter() {
                                        assert_eq!(mon.get_latest_update_id(), $expected_update_id);
@@ -981,7 +978,7 @@ mod tests {
                                        let (_, cm_1) = persister_1.read_monitor(&monitor_name).unwrap();
                                        if cm_1.get_latest_update_id() == $expected_update_id {
                                                assert_eq!(
-                                                       persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE,
+                                                       persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                                monitor_name.as_str()).unwrap().len(),
                                                        0,
                                                        "updates stored when they shouldn't be in persister 1"
@@ -1038,12 +1035,12 @@ mod tests {
                check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
 
                // Make sure the expected number of stale updates is present.
-               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                let (_, monitor) = &persisted_chan_data[0];
                let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
                // The channel should have 0 updates, as it wrote a full monitor and consolidated.
-               assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
-               assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
+               assert_eq!(persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
+               assert_eq!(persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str()).unwrap().len(), 0);
        }
 
        // Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
@@ -1065,7 +1062,7 @@ mod tests {
                        let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
                        let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
                        let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
-                       let test_txo = OutPoint { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
+                       let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
 
                        let ro_persister = MonitorUpdatingPersister {
                                kv_store: &TestStore::new(true),
@@ -1146,7 +1143,7 @@ mod tests {
 
                // Check that the persisted channel data is empty before any channels are
                // open.
-               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                assert_eq!(persisted_chan_data.len(), 0);
 
                // Create some initial channel
@@ -1157,12 +1154,12 @@ mod tests {
                send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
 
                // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
-               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap();
+               let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap();
                let (_, monitor) = &persisted_chan_data[0];
                let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
                persister_0
                        .kv_store
-                       .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
+                       .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str(), &[0u8; 1])
                        .unwrap();
 
                // Do the stale update cleanup
@@ -1171,7 +1168,7 @@ mod tests {
                // Confirm the stale update is unreadable/gone
                assert!(persister_0
                        .kv_store
-                       .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
+                       .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(1).as_str())
                        .is_err());
 
                // Force close.
@@ -1183,7 +1180,7 @@ mod tests {
                // Write an update near u64::MAX
                persister_0
                        .kv_store
-                       .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
+                       .write(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str(), &[0u8; 1])
                        .unwrap();
 
                // Do the stale update cleanup
@@ -1192,7 +1189,7 @@ mod tests {
                // Confirm the stale update is unreadable/gone
                assert!(persister_0
                        .kv_store
-                       .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
+                       .read(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_name.as_str(), UpdateName::from(u64::MAX - 1).as_str())
                        .is_err());
        }
 }