X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Futil%2Fpersist.rs;h=95c68b589fa2c0d3e5694236d1889e99e709d330;hb=65efd92c4a931b3b3622c84c6055bc015152fde0;hp=23e41e4b564b879ee9c36e36389838a76c1e57f4;hpb=fd237101fd3c0ba9fa5838f7463f49cede84f124;p=rust-lightning diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 23e41e4b..95c68b58 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -20,8 +20,8 @@ use crate::prelude::*; use crate::chain; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; -use crate::chain::chainmonitor::{Persist, MonitorUpdateId}; -use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider}; +use crate::chain::chainmonitor::Persist; +use crate::sign::{EntropySource, ecdsa::EcdsaChannelSigner, SignerProvider}; use crate::chain::transaction::OutPoint; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID}; use crate::ln::channelmanager::AChannelManager; @@ -56,6 +56,11 @@ pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; /// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted. pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates"; +/// The primary namespace under which archived [`ChannelMonitor`]s will be persisted. +pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "archived_monitors"; +/// The secondary namespace under which archived [`ChannelMonitor`]s will be persisted. +pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; + /// The primary namespace under which the [`NetworkGraph`] will be persisted. pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = ""; /// The secondary namespace under which the [`NetworkGraph`] will be persisted. @@ -197,13 +202,13 @@ where } } -impl Persist for K { +impl Persist for K { // TODO: We really need a way for the persister to inform the user that its time to crash/shut // down once these start returning failure. // Then we should return InProgress rather than UnrecoverableError, implying we should probably // just shut down the node since we're not retrying persistence! - fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor) -> chain::ChannelMonitorUpdateStatus { let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -215,7 +220,7 @@ impl Persist, monitor: &ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor) -> chain::ChannelMonitorUpdateStatus { let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -226,6 +231,33 @@ impl Persist chain::ChannelMonitorUpdateStatus::UnrecoverableError } } + + fn archive_persisted_channel(&self, funding_txo: OutPoint) { + let monitor_name = MonitorName::from(funding_txo); + let monitor = match self.read( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_name.as_str(), + ) { + Ok(monitor) => monitor, + Err(_) => return + }; + match self.write( + ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_name.as_str(), + &monitor, + ) { + Ok(()) => {} + Err(_e) => return + }; + let _ = self.remove( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_name.as_str(), + true, + ); + } } /// Read previously persisted [`ChannelMonitor`]s from the store. @@ -605,7 +637,7 @@ where } } -impl +impl Persist for MonitorUpdatingPersister where K::Target: KVStore, @@ -616,8 +648,7 @@ where /// Persists a new channel. This means writing the entire monitor to the /// parametrized [`KVStore`]. fn persist_new_channel( - &self, funding_txo: OutPoint, monitor: &ChannelMonitor, - _monitor_update_call_id: MonitorUpdateId, + &self, funding_txo: OutPoint, monitor: &ChannelMonitor ) -> chain::ChannelMonitorUpdateStatus { // Determine the proper key for this monitor let monitor_name = MonitorName::from(funding_txo); @@ -661,10 +692,8 @@ where /// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`] fn update_persisted_channel( &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>, - monitor: &ChannelMonitor, monitor_update_call_id: MonitorUpdateId, + monitor: &ChannelMonitor ) -> chain::ChannelMonitorUpdateStatus { - // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with - // ChannelMonitorUpdate's update_id. if let Some(update) = update { if update.update_id != CLOSED_CHANNEL_UPDATE_ID && update.update_id % self.maximum_pending_updates != 0 @@ -700,7 +729,7 @@ where }; // We could write this update, but it meets criteria of our design that calls for a full monitor write. - let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id); + let monitor_update_status = self.persist_new_channel(funding_txo, monitor); if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status { let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID { @@ -729,9 +758,32 @@ where } } else { // There is no update given, so we must persist a new monitor. - self.persist_new_channel(funding_txo, monitor, monitor_update_call_id) + self.persist_new_channel(funding_txo, monitor) } } + + fn archive_persisted_channel(&self, funding_txo: OutPoint) { + let monitor_name = MonitorName::from(funding_txo); + let monitor = match self.read_monitor(&monitor_name) { + Ok((_block_hash, monitor)) => monitor, + Err(_) => return + }; + match self.kv_store.write( + ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_name.as_str(), + &monitor.encode() + ) { + Ok(()) => {}, + Err(_e) => return, + }; + let _ = self.kv_store.remove( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_name.as_str(), + true, + ); + } } impl MonitorUpdatingPersister @@ -1020,7 +1072,9 @@ mod tests { // Force close because cooperative close doesn't result in any persisted // updates. - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + + let error_message = "Channel force-closed"; + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[0], true); @@ -1058,12 +1112,11 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000); { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); - let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0]; let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; @@ -1075,7 +1128,7 @@ mod tests { entropy_source: node_cfgs[0].keys_manager, signer_provider: node_cfgs[0].keys_manager, }; - match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) { + match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1) { ChannelMonitorUpdateStatus::UnrecoverableError => { // correct result } @@ -1086,7 +1139,7 @@ mod tests { panic!("Returned InProgress when shouldn't have") } } - match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) { + match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1) { ChannelMonitorUpdateStatus::UnrecoverableError => { // correct result } @@ -1176,7 +1229,8 @@ mod tests { .is_err()); // Force close. - nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + let error_message = "Channel force-closed"; + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); @@ -1197,7 +1251,7 @@ mod tests { .is_err()); } - fn persist_fn(_persist: P) -> bool where P::Target: Persist { + fn persist_fn(_persist: P) -> bool where P::Target: Persist { true }