Merge pull request #2775 from benthecarman/sign-psbt
[rust-lightning] / lightning / src / util / persist.rs
index 35e473c42a690f47a216b87d344ab6a3ace87999..a9f534ee4d3413df6489ea1988c13e75b13272b7 100644 (file)
@@ -11,7 +11,7 @@
 use core::cmp;
 use core::convert::{TryFrom, TryInto};
 use core::ops::Deref;
-use bitcoin::hashes::hex::{FromHex, ToHex};
+use core::str::FromStr;
 use bitcoin::{BlockHash, Txid};
 
 use crate::{io, log_error};
@@ -21,7 +21,7 @@ use crate::prelude::*;
 use crate::chain;
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
-use crate::sign::{EntropySource, NodeSigner, WriteableEcdsaChannelSigner, SignerProvider};
+use crate::sign::{EntropySource, NodeSigner, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
 use crate::chain::transaction::OutPoint;
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
 use crate::ln::channelmanager::ChannelManager;
@@ -132,7 +132,7 @@ pub trait KVStore {
 
 /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
 pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>>
-       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
                NS::Target: 'static + NodeSigner,
@@ -153,7 +153,7 @@ pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F:
 
 
 impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
-       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
+       where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
                T::Target: 'static + BroadcasterInterface,
                ES::Target: 'static + EntropySource,
                NS::Target: 'static + NodeSigner,
@@ -194,7 +194,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
        // just shut down the node since we're not retrying persistence!
 
        fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
                match self.write(
                        CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
                        CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
@@ -206,7 +206,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
        }
 
        fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
-               let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+               let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
                match self.write(
                        CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
                        CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
@@ -221,7 +221,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSign
 /// Read previously persisted [`ChannelMonitor`]s from the store.
 pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
        kv_store: K, entropy_source: ES, signer_provider: SP,
-) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
+) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
 where
        K::Target: KVStore,
        ES::Target: EntropySource + Sized,
@@ -238,7 +238,7 @@ where
                                "Stored key has invalid length"));
                }
 
-               let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
+               let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
                        io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
                })?;
 
@@ -246,7 +246,7 @@ where
                        io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
                })?;
 
-               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
                        &mut io::Cursor::new(
                                kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?),
                        (&*entropy_source, &*signer_provider),
@@ -334,9 +334,9 @@ where
 /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
 /// list channel monitors themselves and load channels individually using
 /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
-/// 
+///
 /// ## EXTREMELY IMPORTANT
-/// 
+///
 /// It is extremely important that your [`KVStore::read`] implementation uses the
 /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
 /// that circumstance (not when there is really a permissions error, for example). This is because
@@ -385,7 +385,7 @@ where
        /// consolidation will frequently occur with fewer updates than what you set here; this number
        /// is merely the maximum that may be stored. When setting this value, consider that for higher
        /// values of `maximum_pending_updates`:
-       /// 
+       ///
        ///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
        /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
        /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
@@ -414,7 +414,7 @@ where
        /// documentation for [`MonitorUpdatingPersister`].
        pub fn read_all_channel_monitors_with_updates<B: Deref, F: Deref>(
                &self, broadcaster: &B, fee_estimator: &F,
-       ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>, io::Error>
+       ) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
        where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
@@ -448,12 +448,12 @@ where
        ///
        /// The correct `monitor_key` would be:
        /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
-       /// 
+       ///
        /// Loading a large number of monitors will be faster if done in parallel. You can use this
        /// function to accomplish this. Take care to limit the number of parallel readers.
        pub fn read_channel_monitor_with_updates<B: Deref, F: Deref>(
                &self, broadcaster: &B, fee_estimator: &F, monitor_key: String,
-       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error>
+       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
        where
                B::Target: BroadcasterInterface,
                F::Target: FeeEstimator,
@@ -494,7 +494,7 @@ where
        /// Read a channel monitor.
        fn read_monitor(
                &self, monitor_name: &MonitorName,
-       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), io::Error> {
+       ) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error> {
                let outpoint: OutPoint = monitor_name.try_into()?;
                let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
                        CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
@@ -505,7 +505,7 @@ where
                if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
                        monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
                }
-               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+               match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
                        &mut monitor_cursor,
                        (&*self.entropy_source, &*self.signer_provider),
                ) {
@@ -594,7 +594,7 @@ where
        }
 }
 
-impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref> 
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: Deref, L: Deref, ES: Deref, SP: Deref>
        Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP>
 where
        K::Target: KVStore,
@@ -780,7 +780,7 @@ impl MonitorName {
        fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
                let mut parts = name.splitn(2, '_');
                let txid = if let Some(part) = parts.next() {
-                       Txid::from_hex(part).map_err(|_| {
+                       Txid::from_str(part).map_err(|_| {
                                io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
                        })?
                } else {
@@ -813,7 +813,7 @@ impl TryFrom<&MonitorName> for OutPoint {
 
 impl From<OutPoint> for MonitorName {
        fn from(value: OutPoint) -> Self {
-               MonitorName(format!("{}_{}", value.txid.to_hex(), value.index))
+               MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
        }
 }
 
@@ -873,13 +873,13 @@ mod tests {
        #[test]
        fn monitor_from_outpoint_works() {
                let monitor_name1 = MonitorName::from(OutPoint {
-                       txid: Txid::from_hex("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
+                       txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
                        index: 1,
                });
                assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
 
                let monitor_name2 = MonitorName::from(OutPoint {
-                       txid: Txid::from_hex("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
+                       txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(),
                        index: u16::MAX,
                });
                assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535");
@@ -1062,7 +1062,7 @@ mod tests {
                        let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
                        let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
                        let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0];
-                       let test_txo = OutPoint { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
+                       let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
 
                        let ro_persister = MonitorUpdatingPersister {
                                kv_store: &TestStore::new(true),