-use lightning::chain::channelmonitor;
+use lightning::chain::{chainmonitor, channelmonitor};
use lightning::chain::transaction::OutPoint;
use lightning::util::enforcing_trait_impls::EnforcingSigner;
pub struct TestPersister {}
-impl channelmonitor::Persist<EnforcingSigner> for TestPersister {
+impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
Ok(())
}
use lightning::chain;
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use lightning::chain::chainmonitor::ChainMonitor;
-use lightning::chain::channelmonitor;
+use lightning::chain::chainmonitor::{ChainMonitor, Persist};
use lightning::chain::keysinterface::{Sign, KeysInterface};
use lightning::ln::channelmanager::ChannelManager;
use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
K::Target: 'static + KeysInterface<Signer = Signer>,
F::Target: 'static + FeeEstimator,
L::Target: 'static + Logger,
- P::Target: 'static + channelmonitor::Persist<Signer>,
+ P::Target: 'static + Persist<Signer>,
CMH::Target: 'static + ChannelMessageHandler,
RMH::Target: 'static + RoutingMessageHandler,
UMH::Target: 'static + CustomMessageHandler,
///
/// use lightning::chain;
/// use lightning::chain::Watch;
+/// use lightning::chain::chainmonitor;
/// use lightning::chain::chainmonitor::ChainMonitor;
-/// use lightning::chain::channelmonitor;
/// use lightning::chain::channelmonitor::ChannelMonitor;
/// use lightning::chain::chaininterface::BroadcasterInterface;
/// use lightning::chain::chaininterface::FeeEstimator;
/// F: FeeEstimator,
/// L: Logger,
/// C: chain::Filter,
-/// P: channelmonitor::Persist<S>,
+/// P: chainmonitor::Persist<S>,
/// >(
/// block_source: &mut B,
/// chain_monitor: &ChainMonitor<S, &C, &T, &F, &L, &P>,
//! type Logger = dyn lightning::util::logger::Logger + Send + Sync;
//! type ChainAccess = dyn lightning::chain::Access + Send + Sync;
//! type ChainFilter = dyn lightning::chain::Filter + Send + Sync;
-//! type DataPersister = dyn lightning::chain::channelmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
+//! type DataPersister = dyn lightning::chain::chainmonitor::Persist<lightning::chain::keysinterface::InMemorySigner> + Send + Sync;
//! type ChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::chain::keysinterface::InMemorySigner, Arc<ChainFilter>, Arc<TxBroadcaster>, Arc<FeeEstimator>, Arc<Logger>, Arc<DataPersister>>;
//! type ChannelManager = Arc<lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor, TxBroadcaster, FeeEstimator, Logger>>;
//! type PeerManager = Arc<lightning::ln::peer_handler::SimpleArcPeerManager<lightning_net_tokio::SocketDescriptor, ChainMonitor, TxBroadcaster, FeeEstimator, ChainAccess, Logger>>;
use lightning::chain;
use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use lightning::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr};
-use lightning::chain::channelmonitor;
+use lightning::chain::chainmonitor;
use lightning::chain::keysinterface::{Sign, KeysInterface};
use lightning::chain::transaction::OutPoint;
use lightning::ln::channelmanager::ChannelManager;
}
}
-impl<ChannelSigner: Sign> channelmonitor::Persist<ChannelSigner> for FilesystemPersister {
+impl<ChannelSigner: Sign> chainmonitor::Persist<ChannelSigner> for FilesystemPersister {
fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr> {
let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
util::write_to_file(self.path_to_monitor_data(), filename, monitor)
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::hashes::hex::FromHex;
use bitcoin::Txid;
- use lightning::chain::channelmonitor::{Persist, ChannelMonitorUpdateErr};
+ use lightning::chain::chainmonitor::Persist;
+ use lightning::chain::channelmonitor::ChannelMonitorUpdateErr;
use lightning::chain::transaction::OutPoint;
use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
use lightning::ln::features::InitFeatures;
use chain;
use chain::{Filter, WatchedOutput};
use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use chain::channelmonitor;
-use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, Balance, MonitorEvent, Persist, TransactionOutputs};
+use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, Balance, MonitorEvent, TransactionOutputs};
use chain::transaction::{OutPoint, TransactionData};
use chain::keysinterface::Sign;
use util::logger::Logger;
use sync::RwLock;
use core::ops::Deref;
+/// `Persist` defines behavior for persisting channel monitors: this could mean
+/// writing once to disk, and/or uploading to one or more backup services.
+///
+/// Note that for every new monitor, you **must** persist the new `ChannelMonitor`
+/// to disk/backups. And, on every update, you **must** persist either the
+/// `ChannelMonitorUpdate` or the updated monitor itself. Otherwise, there is risk
+/// of situations such as revoking a transaction, then crashing before this
+/// revocation can be persisted, then unintentionally broadcasting a revoked
+/// transaction and losing money. This is a risk because previous channel states
+/// are toxic, so it's important that whatever channel state is persisted is
+/// kept up-to-date.
+pub trait Persist<ChannelSigner: Sign> {
+ /// Persist a new channel's data. The data can be stored any way you want, but
+ /// the identifier provided by Rust-Lightning is the channel's outpoint (and
+ /// it is up to you to maintain a correct mapping between the outpoint and the
+ /// stored channel data). Note that you **must** persist every new monitor to
+ /// disk. See the `Persist` trait documentation for more details.
+ ///
+ /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
+ /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+ ///
+ /// [`Writeable::write`]: crate::util::ser::Writeable::write
+ fn persist_new_channel(&self, id: OutPoint, data: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+
+ /// Update one channel's data. The provided `ChannelMonitor` has already
+ /// applied the given update.
+ ///
+ /// Note that on every update, you **must** persist either the
+ /// `ChannelMonitorUpdate` or the updated monitor itself to disk/backups. See
+ /// the `Persist` trait documentation for more details.
+ ///
+ /// If an implementer chooses to persist the updates only, they need to make
+ /// sure that all the updates are applied to the `ChannelMonitors` *before*
+ /// the set of channel monitors is given to the `ChannelManager`
+ /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
+ /// applying a monitor update to a monitor. If full `ChannelMonitors` are
+ /// persisted, then there is no need to persist individual updates.
+ ///
+ /// Note that there could be a performance tradeoff between persisting complete
+ /// channel monitors on every update vs. persisting only updates and applying
+ /// them in batches. The size of each monitor grows `O(number of state updates)`
+ /// whereas updates are small and `O(1)`.
+ ///
+ /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
+ /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
+ /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
+ ///
+ /// [`Writeable::write`]: crate::util::ser::Writeable::write
+ fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
+}
+
/// An implementation of [`chain::Watch`] for monitoring channels.
///
/// Connected and disconnected blocks must be provided to `ChainMonitor` as documented by
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- P::Target: channelmonitor::Persist<ChannelSigner>,
+ P::Target: Persist<ChannelSigner>,
{
/// The monitors
pub monitors: RwLock<HashMap<OutPoint, ChannelMonitor<ChannelSigner>>>,
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- P::Target: channelmonitor::Persist<ChannelSigner>,
+ P::Target: Persist<ChannelSigner>,
{
/// Dispatches to per-channel monitors, which are responsible for updating their on-chain view
/// of a channel and reacting accordingly based on transactions in the given chain data. See
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- P::Target: channelmonitor::Persist<ChannelSigner>,
+ P::Target: Persist<ChannelSigner>,
{
fn block_connected(&self, block: &Block, height: u32) {
let header = &block.header;
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- P::Target: channelmonitor::Persist<ChannelSigner>,
+ P::Target: Persist<ChannelSigner>,
{
fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
log_debug!(self.logger, "{} provided transactions confirmed at height {} in block {}", txdata.len(), height, header.block_hash());
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- P::Target: channelmonitor::Persist<ChannelSigner>,
+ P::Target: Persist<ChannelSigner>,
{
/// Adds the monitor that watches the channel referred to by the given outpoint.
///
T::Target: BroadcasterInterface,
F::Target: FeeEstimator,
L::Target: Logger,
- P::Target: channelmonitor::Persist<ChannelSigner>,
+ P::Target: Persist<ChannelSigner>,
{
/// Processes [`SpendableOutputs`] events produced from each [`ChannelMonitor`] upon maturity.
///
}
}
-/// `Persist` defines behavior for persisting channel monitors: this could mean
-/// writing once to disk, and/or uploading to one or more backup services.
-///
-/// Note that for every new monitor, you **must** persist the new `ChannelMonitor`
-/// to disk/backups. And, on every update, you **must** persist either the
-/// `ChannelMonitorUpdate` or the updated monitor itself. Otherwise, there is risk
-/// of situations such as revoking a transaction, then crashing before this
-/// revocation can be persisted, then unintentionally broadcasting a revoked
-/// transaction and losing money. This is a risk because previous channel states
-/// are toxic, so it's important that whatever channel state is persisted is
-/// kept up-to-date.
-pub trait Persist<ChannelSigner: Sign> {
- /// Persist a new channel's data. The data can be stored any way you want, but
- /// the identifier provided by Rust-Lightning is the channel's outpoint (and
- /// it is up to you to maintain a correct mapping between the outpoint and the
- /// stored channel data). Note that you **must** persist every new monitor to
- /// disk. See the `Persist` trait documentation for more details.
- ///
- /// See [`ChannelMonitor::write`] for writing out a `ChannelMonitor`,
- /// and [`ChannelMonitorUpdateErr`] for requirements when returning errors.
- fn persist_new_channel(&self, id: OutPoint, data: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
-
- /// Update one channel's data. The provided `ChannelMonitor` has already
- /// applied the given update.
- ///
- /// Note that on every update, you **must** persist either the
- /// `ChannelMonitorUpdate` or the updated monitor itself to disk/backups. See
- /// the `Persist` trait documentation for more details.
- ///
- /// If an implementer chooses to persist the updates only, they need to make
- /// sure that all the updates are applied to the `ChannelMonitors` *before*
- /// the set of channel monitors is given to the `ChannelManager`
- /// deserialization routine. See [`ChannelMonitor::update_monitor`] for
- /// applying a monitor update to a monitor. If full `ChannelMonitors` are
- /// persisted, then there is no need to persist individual updates.
- ///
- /// Note that there could be a performance tradeoff between persisting complete
- /// channel monitors on every update vs. persisting only updates and applying
- /// them in batches. The size of each monitor grows `O(number of state updates)`
- /// whereas updates are small and `O(1)`.
- ///
- /// See [`ChannelMonitor::write`] for writing out a `ChannelMonitor`,
- /// [`ChannelMonitorUpdate::write`] for writing out an update, and
- /// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
- fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor<ChannelSigner>) -> Result<(), ChannelMonitorUpdateErr>;
-}
-
impl<Signer: Sign, T: Deref, F: Deref, L: Deref> chain::Listen for (ChannelMonitor<Signer>, T, F, L)
where
T::Target: BroadcasterInterface,
#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
pub mod bench {
use chain::Listen;
- use chain::chainmonitor::ChainMonitor;
- use chain::channelmonitor::Persist;
+ use chain::chainmonitor::{ChainMonitor, Persist};
use chain::keysinterface::{KeysManager, InMemorySigner};
use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
use ln::features::{InitFeatures, InvoiceFeatures};
pub struct TestChainMonitor<'a> {
pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<EnforcingSigner>)>>,
pub latest_monitor_update_id: Mutex<HashMap<[u8; 32], (OutPoint, u64)>>,
- pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a channelmonitor::Persist<EnforcingSigner>>,
+ pub chain_monitor: chainmonitor::ChainMonitor<EnforcingSigner, &'a TestChainSource, &'a chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a chainmonitor::Persist<EnforcingSigner>>,
pub keys_manager: &'a TestKeysInterface,
pub update_ret: Mutex<Option<Result<(), channelmonitor::ChannelMonitorUpdateErr>>>,
/// If this is set to Some(), after the next return, we'll always return this until update_ret
pub expect_channel_force_closed: Mutex<Option<([u8; 32], bool)>>,
}
impl<'a> TestChainMonitor<'a> {
- pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a channelmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
+ pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a chainmonitor::Persist<EnforcingSigner>, keys_manager: &'a TestKeysInterface) -> Self {
Self {
added_monitors: Mutex::new(Vec::new()),
latest_monitor_update_id: Mutex::new(HashMap::new()),
*self.update_ret.lock().unwrap() = ret;
}
}
-impl<Signer: keysinterface::Sign> channelmonitor::Persist<Signer> for TestPersister {
+impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersister {
fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
self.update_ret.lock().unwrap().clone()
}