X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Futil%2Fpersist.rs;h=95c68b589fa2c0d3e5694236d1889e99e709d330;hb=65efd92c4a931b3b3622c84c6055bc015152fde0;hp=3f918935f16700008cae3b2854babeae60bbd740;hpb=4b5504366b2e0c7e29e8bf4bee5d05445f363c35;p=rust-lightning diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 3f918935..95c68b58 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -20,8 +20,8 @@ use crate::prelude::*; use crate::chain; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use crate::chain::chainmonitor::{Persist, MonitorUpdateId}; -use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider}; +use crate::chain::chainmonitor::Persist; +use crate::sign::{EntropySource, ecdsa::EcdsaChannelSigner, SignerProvider}; use crate::chain::transaction::OutPoint; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID}; use crate::ln::channelmanager::AChannelManager; @@ -75,6 +75,20 @@ pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; /// The key under which the [`WriteableScore`] will be persisted. pub const SCORER_PERSISTENCE_KEY: &str = "scorer"; +/// The primary namespace under which [`OutputSweeper`] state will be persisted. +/// +/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper +pub const OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; +/// The secondary namespace under which [`OutputSweeper`] state will be persisted. +/// +/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper +pub const OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; +/// The secondary namespace under which [`OutputSweeper`] state will be persisted. +/// The key under which [`OutputSweeper`] state will be persisted. +/// +/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper +pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper"; + /// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`]. /// /// This serves to prevent someone from accidentally loading such monitors (which may need @@ -188,13 +202,13 @@ where } } -impl Persist for K { +impl Persist for K { // TODO: We really need a way for the persister to inform the user that its time to crash/shut // down once these start returning failure. // Then we should return InProgress rather than UnrecoverableError, implying we should probably // just shut down the node since we're not retrying persistence! - fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor) -> chain::ChannelMonitorUpdateStatus { let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -206,7 +220,7 @@ impl Persist, monitor: &ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor) -> chain::ChannelMonitorUpdateStatus { let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -623,7 +637,7 @@ where } } -impl +impl Persist for MonitorUpdatingPersister where K::Target: KVStore, @@ -634,8 +648,7 @@ where /// Persists a new channel. This means writing the entire monitor to the /// parametrized [`KVStore`]. fn persist_new_channel( - &self, funding_txo: OutPoint, monitor: &ChannelMonitor, - _monitor_update_call_id: MonitorUpdateId, + &self, funding_txo: OutPoint, monitor: &ChannelMonitor ) -> chain::ChannelMonitorUpdateStatus { // Determine the proper key for this monitor let monitor_name = MonitorName::from(funding_txo); @@ -679,10 +692,8 @@ where /// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`] fn update_persisted_channel( &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>, - monitor: &ChannelMonitor, monitor_update_call_id: MonitorUpdateId, + monitor: &ChannelMonitor ) -> chain::ChannelMonitorUpdateStatus { - // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with - // ChannelMonitorUpdate's update_id. if let Some(update) = update { if update.update_id != CLOSED_CHANNEL_UPDATE_ID && update.update_id % self.maximum_pending_updates != 0 @@ -718,7 +729,7 @@ where }; // We could write this update, but it meets criteria of our design that calls for a full monitor write. - let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id); + let monitor_update_status = self.persist_new_channel(funding_txo, monitor); if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status { let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID { @@ -747,7 +758,7 @@ where } } else { // There is no update given, so we must persist a new monitor. - self.persist_new_channel(funding_txo, monitor, monitor_update_call_id) + self.persist_new_channel(funding_txo, monitor) } } @@ -1061,7 +1072,9 @@ mod tests { // Force close because cooperative close doesn't result in any persisted // updates. - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + + let error_message = "Channel force-closed"; + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[0], true); @@ -1099,12 +1112,11 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); - let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0]; let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; @@ -1116,7 +1128,7 @@ mod tests { entropy_source: node_cfgs[0].keys_manager, signer_provider: node_cfgs[0].keys_manager, }; - match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) { + match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1) { ChannelMonitorUpdateStatus::UnrecoverableError => { // correct result } @@ -1127,7 +1139,7 @@ mod tests { panic!("Returned InProgress when shouldn't have") } } - match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) { + match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1) { ChannelMonitorUpdateStatus::UnrecoverableError => { // correct result } @@ -1217,7 +1229,8 @@ mod tests { .is_err()); // Force close. - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -1238,7 +1251,7 @@ mod tests { .is_err()); } - fn persist_fn(_persist: P) -> bool where P::Target: Persist { + fn persist_fn(_persist: P) -> bool where P::Target: Persist { true }