X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Futil%2Fpersist.rs;h=a9f534ee4d3413df6489ea1988c13e75b13272b7;hb=e94af0c3416744d5af5784e2d1dcf96d6f45b91f;hp=2a022c37cc4a476e3bd3c2ecac076b53f25e9d87;hpb=082a19b2d4f859ed8377ade2c42db6287901b8f4;p=rust-lightning diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 2a022c37..a9f534ee 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -11,7 +11,7 @@ use core::cmp; use core::convert::{TryFrom, TryInto}; use core::ops::Deref; -use bitcoin::hashes::hex::{FromHex, ToHex}; +use core::str::FromStr; use bitcoin::{BlockHash, Txid}; use crate::{io, log_error}; @@ -21,7 +21,7 @@ use crate::prelude::*; use crate::chain; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use crate::chain::chainmonitor::{Persist, MonitorUpdateId}; -use crate::sign::{EntropySource, NodeSigner, WriteableEcdsaChannelSigner, SignerProvider}; +use crate::sign::{EntropySource, NodeSigner, ecdsa::WriteableEcdsaChannelSigner, SignerProvider}; use crate::chain::transaction::OutPoint; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID}; use crate::ln::channelmanager::ChannelManager; @@ -132,7 +132,7 @@ pub trait KVStore { /// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk. pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> - where M::Target: 'static + chain::Watch<::Signer>, + where M::Target: 'static + chain::Watch<::EcdsaSigner>, T::Target: 'static + BroadcasterInterface, ES::Target: 'static + EntropySource, NS::Target: 'static + NodeSigner, @@ -153,7 +153,7 @@ pub trait Persister<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A - where M::Target: 'static + chain::Watch<::Signer>, + where M::Target: 'static + chain::Watch<::EcdsaSigner>, T::Target: 'static + BroadcasterInterface, ES::Target: 'static + EntropySource, NS::Target: 'static + NodeSigner, @@ -194,7 +194,7 @@ impl Persist, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { - let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index); + let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, @@ -206,7 +206,7 @@ impl Persist, monitor: &ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { - let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index); + let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, @@ -221,7 +221,7 @@ impl Persist( kv_store: K, entropy_source: ES, signer_provider: SP, -) -> Result::Signer>)>, io::Error> +) -> Result::EcdsaSigner>)>, io::Error> where K::Target: KVStore, ES::Target: EntropySource + Sized, @@ -238,7 +238,7 @@ where "Stored key has invalid length")); } - let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| { + let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| { io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key") })?; @@ -246,7 +246,7 @@ where io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key") })?; - match <(BlockHash, ChannelMonitor<::Signer>)>::read( + match <(BlockHash, ChannelMonitor<::EcdsaSigner>)>::read( &mut io::Cursor::new( kv_store.read(CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, &stored_key)?), (&*entropy_source, &*signer_provider), @@ -334,9 +334,9 @@ where /// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can /// list channel monitors themselves and load channels individually using /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`]. -/// +/// /// ## EXTREMELY IMPORTANT -/// +/// /// It is extremely important that your [`KVStore::read`] implementation uses the /// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in /// that circumstance (not when there is really a permissions error, for example). This is because @@ -385,7 +385,7 @@ where /// consolidation will frequently occur with fewer updates than what you set here; this number /// is merely the maximum that may be stored. When setting this value, consider that for higher /// values of `maximum_pending_updates`: - /// + /// /// - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than /// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every /// `maximum_pending_updates` [`ChannelMonitorUpdate`]s. @@ -397,11 +397,7 @@ where pub fn new( kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES, signer_provider: SP, - ) -> Self - where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, - { + ) -> Self { MonitorUpdatingPersister { kv_store, logger, @@ -416,12 +412,10 @@ where /// It is extremely important that your [`KVStore::read`] implementation uses the /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the /// documentation for [`MonitorUpdatingPersister`]. - pub fn read_all_channel_monitors_with_updates( - &self, broadcaster: B, fee_estimator: F, - ) -> Result::Signer>)>, io::Error> + pub fn read_all_channel_monitors_with_updates( + &self, broadcaster: &B, fee_estimator: &F, + ) -> Result::EcdsaSigner>)>, io::Error> where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, B::Target: BroadcasterInterface, F::Target: FeeEstimator, { @@ -432,8 +426,8 @@ where let mut res = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { res.push(self.read_channel_monitor_with_updates( - &broadcaster, - fee_estimator.clone(), + broadcaster, + fee_estimator, monitor_key, )?) } @@ -454,15 +448,13 @@ where /// /// The correct `monitor_key` would be: /// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1` - /// + /// /// Loading a large number of monitors will be faster if done in parallel. You can use this /// function to accomplish this. Take care to limit the number of parallel readers. - pub fn read_channel_monitor_with_updates( - &self, broadcaster: &B, fee_estimator: F, monitor_key: String, - ) -> Result<(BlockHash, ChannelMonitor<::Signer>), io::Error> + pub fn read_channel_monitor_with_updates( + &self, broadcaster: &B, fee_estimator: &F, monitor_key: String, + ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> where - ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, B::Target: BroadcasterInterface, F::Target: FeeEstimator, { @@ -484,7 +476,7 @@ where Err(err) => return Err(err), }; - monitor.update_monitor(&update, broadcaster, fee_estimator.clone(), &self.logger) + monitor.update_monitor(&update, broadcaster, fee_estimator, &self.logger) .map_err(|e| { log_error!( self.logger, @@ -502,7 +494,7 @@ where /// Read a channel monitor. fn read_monitor( &self, monitor_name: &MonitorName, - ) -> Result<(BlockHash, ChannelMonitor<::Signer>), io::Error> { + ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> { let outpoint: OutPoint = monitor_name.try_into()?; let mut monitor_cursor = io::Cursor::new(self.kv_store.read( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -513,7 +505,7 @@ where if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64); } - match <(BlockHash, ChannelMonitor<::Signer>)>::read( + match <(BlockHash, ChannelMonitor<::EcdsaSigner>)>::read( &mut monitor_cursor, (&*self.entropy_source, &*self.signer_provider), ) { @@ -602,7 +594,7 @@ where } } -impl +impl Persist for MonitorUpdatingPersister where K::Target: KVStore, @@ -788,7 +780,7 @@ impl MonitorName { fn do_try_into_outpoint(name: &str) -> Result { let mut parts = name.splitn(2, '_'); let txid = if let Some(part) = parts.next() { - Txid::from_hex(part).map_err(|_| { + Txid::from_str(part).map_err(|_| { io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key") })? } else { @@ -821,7 +813,7 @@ impl TryFrom<&MonitorName> for OutPoint { impl From for MonitorName { fn from(value: OutPoint) -> Self { - MonitorName(format!("{}_{}", value.txid.to_hex(), value.index)) + MonitorName(format!("{}_{}", value.txid.to_string(), value.index)) } } @@ -881,13 +873,13 @@ mod tests { #[test] fn monitor_from_outpoint_works() { let monitor_name1 = MonitorName::from(OutPoint { - txid: Txid::from_hex("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(), + txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(), index: 1, }); assert_eq!(monitor_name1.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1"); let monitor_name2 = MonitorName::from(OutPoint { - txid: Txid::from_hex("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(), + txid: Txid::from_str("f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef").unwrap(), index: u16::MAX, }); assert_eq!(monitor_name2.as_str(), "f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535"); @@ -949,17 +941,17 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. let mut persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates( - broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_0.len(), 0); let mut persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates( - broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap(); + &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_1.len(), 0); // Helper to make sure the channel is on the expected update ID. macro_rules! check_persisted_data { ($expected_update_id: expr) => { persisted_chan_data_0 = persister_0.read_all_channel_monitors_with_updates( - broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + &broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); // check that we stored only one monitor assert_eq!(persisted_chan_data_0.len(), 1); for (_, mon) in persisted_chan_data_0.iter() { @@ -978,7 +970,7 @@ mod tests { } } persisted_chan_data_1 = persister_1.read_all_channel_monitors_with_updates( - broadcaster_1, &chanmon_cfgs[1].fee_estimator).unwrap(); + &broadcaster_1, &&chanmon_cfgs[1].fee_estimator).unwrap(); assert_eq!(persisted_chan_data_1.len(), 1); for (_, mon) in persisted_chan_data_1.iter() { assert_eq!(mon.get_latest_update_id(), $expected_update_id); @@ -1043,7 +1035,7 @@ mod tests { check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID); // Make sure the expected number of stale updates is present. - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); let (_, monitor) = &persisted_chan_data[0]; let monitor_name = MonitorName::from(monitor.get_funding_txo().0); // The channel should have 0 updates, as it wrote a full monitor and consolidated. @@ -1070,7 +1062,7 @@ mod tests { let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0]; - let test_txo = OutPoint { txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; + let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; let ro_persister = MonitorUpdatingPersister { kv_store: &TestStore::new(true), @@ -1151,7 +1143,7 @@ mod tests { // Check that the persisted channel data is empty before any channels are // open. - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); assert_eq!(persisted_chan_data.len(), 0); // Create some initial channel @@ -1162,7 +1154,7 @@ mod tests { send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000); // Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible) - let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(broadcaster_0, &chanmon_cfgs[0].fee_estimator).unwrap(); + let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates(&broadcaster_0, &&chanmon_cfgs[0].fee_estimator).unwrap(); let (_, monitor) = &persisted_chan_data[0]; let monitor_name = MonitorName::from(monitor.get_funding_txo().0); persister_0