use core::sync::atomic::{AtomicUsize, Ordering};
use bitcoin::secp256k1::PublicKey;
-mod update_origin {
- #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
- /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
- /// entirely opaque.
- pub(crate) enum UpdateOrigin {
- /// An update that was generated by the `ChannelManager` (via our [`crate::chain::Watch`]
- /// implementation). This corresponds to an actual [ChannelMonitorUpdate::update_id] field
- /// and [ChannelMonitor::get_latest_update_id].
- ///
- /// [ChannelMonitor::get_latest_update_id]: crate::chain::channelmonitor::ChannelMonitor::get_latest_update_id
- /// [ChannelMonitorUpdate::update_id]: crate::chain::channelmonitor::ChannelMonitorUpdate::update_id
- OffChain(u64),
- /// An update that was generated during blockchain processing. The ID here is specific to the
- /// generating [ChannelMonitor] and does *not* correspond to any on-disk IDs.
- ///
- /// [ChannelMonitor]: crate::chain::channelmonitor::ChannelMonitor
- ChainSync(u64),
- }
-}
-
-#[cfg(any(feature = "_test_utils", test))]
-pub(crate) use update_origin::UpdateOrigin;
-#[cfg(not(any(feature = "_test_utils", test)))]
-use update_origin::UpdateOrigin;
-
-/// An opaque identifier describing a specific [`Persist`] method call.
-#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
-pub struct MonitorUpdateId {
- pub(crate) contents: UpdateOrigin,
-}
-
-impl MonitorUpdateId {
- pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self {
- Self { contents: UpdateOrigin::OffChain(update.update_id) }
- }
- pub(crate) fn from_new_monitor<ChannelSigner: WriteableEcdsaChannelSigner>(monitor: &ChannelMonitor<ChannelSigner>) -> Self {
- Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) }
- }
-}
-
/// `Persist` defines behavior for persisting channel monitors: this could mean
/// writing once to disk, and/or uploading to one or more backup services.
///
/// All calls should generally spawn a background task and immediately return
/// [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
/// [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
-/// [`MonitorUpdateId`].
+/// [`ChannelMonitor::get_latest_update_id`] or [`ChannelMonitorUpdate::update_id`].
///
/// Note that unlike the direct [`chain::Watch`] interface,
/// [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
/// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
/// and the stored channel data). Note that you **must** persist every new monitor to disk.
///
- /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
- /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
+ /// The [`ChannelMonitor::get_latest_update_id`] uniquely links this call to [`ChainMonitor::channel_monitor_updated`].
+ /// For [`Persist::persist_new_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
+ /// when you return [`ChannelMonitorUpdateStatus::InProgress`].
///
/// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
/// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
/// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
/// update.
/// them in batches. The size of each monitor grows `O(number of state updates)`
/// whereas updates are small and `O(1)`.
///
- /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
- /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
+ /// The [`ChannelMonitorUpdate::update_id`] or [`ChannelMonitor::get_latest_update_id`] uniquely
+ /// links this call to [`ChainMonitor::channel_monitor_updated`].
+ /// For [`Persist::update_persisted_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
+ /// when an [`ChannelMonitorUpdate`] is provided and when you return [`ChannelMonitorUpdateStatus::InProgress`].
///
/// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
/// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
/// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
///
/// [`Writeable::write`]: crate::util::ser::Writeable::write
- fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+ fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
/// Prevents the channel monitor from being loaded on startup.
///
/// Archiving the data in a backup location (rather than deleting it fully) is useful for
/// update_persisted_channel, the user returns a
/// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
/// immediately, racing our insertion of the pending update into the contained Vec.
- pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
+ pending_monitor_updates: Mutex<Vec<u64>>,
}
impl<ChannelSigner: WriteableEcdsaChannelSigner> MonitorHolder<ChannelSigner> {
- fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
- pending_monitor_updates_lock.iter().any(|update_id|
- if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false })
+ fn has_pending_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<u64>>) -> bool {
+ !pending_monitor_updates_lock.is_empty()
}
}
P::Target: Persist<ChannelSigner>,
{
monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
- /// When we generate a [`MonitorUpdateId`] for a chain-event monitor persistence, we need a
+ /// When we generate a monitor update for a chain-event monitor persistence, we need a
/// unique ID, which we calculate by simply getting the next value from this counter. Note that
/// the ID is never persisted so it's ok that they reset on restart.
sync_persistence_id: AtomicCounter,
let mut txn_outputs;
{
txn_outputs = process(monitor, txdata);
- let chain_sync_update_id = self.sync_persistence_id.get_increment();
- let update_id = MonitorUpdateId {
- contents: UpdateOrigin::ChainSync(chain_sync_update_id),
- };
-
- log_trace!(logger, "Syncing Channel Monitor for channel {} for block-data update_id {}",
- log_funding_info!(monitor),
- chain_sync_update_id
- );
- match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
+ log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+ match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) {
ChannelMonitorUpdateStatus::Completed =>
- log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data update_id {}",
- log_funding_info!(monitor),
- chain_sync_update_id
+ log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data",
+ log_funding_info!(monitor)
),
ChannelMonitorUpdateStatus::InProgress => {
log_debug!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor));
#[cfg(not(c_bindings))]
/// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
- pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<MonitorUpdateId>> {
+ /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
+ /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
+ /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
+ pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<u64>> {
hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
(*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
}))
#[cfg(c_bindings)]
/// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
- pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<MonitorUpdateId>)> {
+ /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
+ /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
+ /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
+ pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<u64>)> {
self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
(*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
}).collect()
/// 1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
/// update to disk and begins updating any remote (e.g. watchtower/backup) copies,
/// returning [`ChannelMonitorUpdateStatus::InProgress`],
- /// 2) once all remote copies are updated, you call this function with the
- /// `completed_update_id` that completed, and once all pending updates have completed the
- /// channel will be re-enabled.
- // Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't
- // care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We
- // only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s.
+ /// 2) once all remote copies are updated, you call this function with [`ChannelMonitor::get_latest_update_id`]
+ /// or [`ChannelMonitorUpdate::update_id`] as the `completed_update_id`, and once all pending
+ /// updates have completed the channel will be re-enabled.
+ ///
+ /// It is only necessary to call [`ChainMonitor::channel_monitor_updated`] when you return [`ChannelMonitorUpdateStatus::InProgress`]
+ /// from [`Persist`] and either:
+ /// 1. A new [`ChannelMonitor`] was added in [`Persist::persist_new_channel`], or
+ /// 2. A [`ChannelMonitorUpdate`] was provided as part of [`Persist::update_persisted_channel`].
+ /// Note that we don't care about calls to [`Persist::update_persisted_channel`] where no
+ /// [`ChannelMonitorUpdate`] was provided.
///
/// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
/// registered [`ChannelMonitor`]s.
- pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> {
+ pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> {
let monitors = self.monitors.read().unwrap();
let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
- match completed_update_id {
- MonitorUpdateId { contents: UpdateOrigin::OffChain(completed_update_id) } => {
- // Note that we only check for `UpdateOrigin::OffChain` failures here - if
- // we're being told that a `UpdateOrigin::OffChain` monitor update completed,
- // we only care about ensuring we don't tell the `ChannelManager` to restore
- // the channel to normal operation until all `UpdateOrigin::OffChain` updates
- // complete.
- // If there's some `UpdateOrigin::ChainSync` update still pending that's okay
- // - we can still update our channel state, just as long as we don't return
- // `MonitorEvent`s from the monitor back to the `ChannelManager` until they
- // complete.
- let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
- log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
- completed_update_id,
- funding_txo,
- if monitor_is_pending_updates {
- "still have pending off-chain updates"
- } else {
- "all off-chain updates complete, returning a MonitorEvent"
- });
- if monitor_is_pending_updates {
- // If there are still monitor updates pending, we cannot yet construct a
- // Completed event.
- return Ok(());
- }
- let channel_id = monitor_data.monitor.channel_id();
- self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
- funding_txo, channel_id,
- monitor_update_id: monitor_data.monitor.get_latest_update_id(),
- }], monitor_data.monitor.get_counterparty_node_id()));
- },
- MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {},
+ // Note that we only check for pending non-chainsync monitor updates and we don't track monitor
+ // updates resulting from chainsync in `pending_monitor_updates`.
+ let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates);
+ log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
+ completed_update_id,
+ funding_txo,
+ if monitor_is_pending_updates {
+ "still have pending off-chain updates"
+ } else {
+ "all off-chain updates complete, returning a MonitorEvent"
+ });
+ if monitor_is_pending_updates {
+ // If there are still monitor updates pending, we cannot yet construct a
+ // Completed event.
+ return Ok(());
}
+ let channel_id = monitor_data.monitor.channel_id();
+ self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
+ funding_txo, channel_id,
+ monitor_update_id: monitor_data.monitor.get_latest_update_id(),
+ }], monitor_data.monitor.get_counterparty_node_id()));
+
self.event_notifier.notify();
Ok(())
}
hash_map::Entry::Vacant(e) => e,
};
log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
- let update_id = MonitorUpdateId::from_new_monitor(&monitor);
+ let update_id = monitor.get_latest_update_id();
let mut pending_monitor_updates = Vec::new();
- let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
+ let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor);
match persist_res {
ChannelMonitorUpdateStatus::InProgress => {
log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor));
let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
- let update_id = MonitorUpdateId::from_monitor_update(update);
+ let update_id = update.update_id;
let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
let persist_res = if update_res.is_err() {
// Even if updating the monitor returns an error, the monitor's state will
// while reading `channel_monitor` with updates from storage. Instead, we should persist
// the entire `channel_monitor` here.
log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
- self.persister.update_persisted_channel(funding_txo, None, monitor, update_id)
+ self.persister.update_persisted_channel(funding_txo, None, monitor)
} else {
- self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id)
+ self.persister.update_persisted_channel(funding_txo, Some(update), monitor)
};
match persist_res {
ChannelMonitorUpdateStatus::InProgress => {
use crate::chain;
use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
+use crate::chain::chainmonitor::Persist;
use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
use crate::chain::transaction::OutPoint;
use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
// Then we should return InProgress rather than UnrecoverableError, implying we should probably
// just shut down the node since we're not retrying persistence!
- fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+ fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
match self.write(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
}
}
- fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+ fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
match self.write(
CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
/// Persists a new channel. This means writing the entire monitor to the
/// parametrized [`KVStore`].
fn persist_new_channel(
- &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
- _monitor_update_call_id: MonitorUpdateId,
+ &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>
) -> chain::ChannelMonitorUpdateStatus {
// Determine the proper key for this monitor
let monitor_name = MonitorName::from(funding_txo);
/// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
fn update_persisted_channel(
&self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
- monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
+ monitor: &ChannelMonitor<ChannelSigner>
) -> chain::ChannelMonitorUpdateStatus {
- // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
- // ChannelMonitorUpdate's update_id.
if let Some(update) = update {
if update.update_id != CLOSED_CHANNEL_UPDATE_ID
&& update.update_id % self.maximum_pending_updates != 0
};
// We could write this update, but it meets criteria of our design that calls for a full monitor write.
- let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
+ let monitor_update_status = self.persist_new_channel(funding_txo, monitor);
if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
}
} else {
// There is no update given, so we must persist a new monitor.
- self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
+ self.persist_new_channel(funding_txo, monitor)
}
}
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
{
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
- let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
entropy_source: node_cfgs[0].keys_manager,
signer_provider: node_cfgs[0].keys_manager,
};
- match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+ match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1) {
ChannelMonitorUpdateStatus::UnrecoverableError => {
// correct result
}
panic!("Returned InProgress when shouldn't have")
}
}
- match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
+ match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1) {
ChannelMonitorUpdateStatus::UnrecoverableError => {
// correct result
}
#[cfg(test)]
use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW;
use crate::chain::chainmonitor;
-use crate::chain::chainmonitor::{MonitorUpdateId};
use crate::chain::channelmonitor;
use crate::chain::channelmonitor::MonitorEvent;
use crate::chain::transaction::OutPoint;
pub struct TestChainMonitor<'a> {
pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
- pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
+ pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, u64)>>,
pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a dyn chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a dyn chainmonitor::Persist<TestChannelSigner>>,
pub keys_manager: &'a TestKeysInterface,
/// If this is set to Some(), the next update_channel call (not watch_channel) must be a
&mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
assert!(new_monitor == monitor);
self.latest_monitor_update_id.lock().unwrap().insert(monitor.channel_id(),
- (funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor)));
+ (funding_txo, monitor.get_latest_update_id(), monitor.get_latest_update_id()));
self.added_monitors.lock().unwrap().push((funding_txo, monitor));
self.chain_monitor.watch_channel(funding_txo, new_monitor)
}
}
self.latest_monitor_update_id.lock().unwrap().insert(channel_id,
- (funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(update)));
+ (funding_txo, update.update_id, update.update_id));
let update_res = self.chain_monitor.update_channel(funding_txo, update);
// At every point where we get a monitor update, we should be able to send a useful monitor
// to a watchtower and disk...
#[cfg(test)]
impl<Signer: sign::ecdsa::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for WatchtowerPersister {
fn persist_new_channel(&self, funding_txo: OutPoint,
- data: &channelmonitor::ChannelMonitor<Signer>, id: MonitorUpdateId
+ data: &channelmonitor::ChannelMonitor<Signer>
) -> chain::ChannelMonitorUpdateStatus {
- let res = self.persister.persist_new_channel(funding_txo, data, id);
+ let res = self.persister.persist_new_channel(funding_txo, data);
assert!(self.unsigned_justice_tx_data.lock().unwrap()
.insert(funding_txo, VecDeque::new()).is_none());
fn update_persisted_channel(
&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>,
- data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId
+ data: &channelmonitor::ChannelMonitor<Signer>
) -> chain::ChannelMonitorUpdateStatus {
- let res = self.persister.update_persisted_channel(funding_txo, update, data, update_id);
+ let res = self.persister.update_persisted_channel(funding_txo, update, data);
if let Some(update) = update {
let commitment_txs = data.counterparty_commitment_txs_from_update(update);
/// returned.
pub update_rets: Mutex<VecDeque<chain::ChannelMonitorUpdateStatus>>,
/// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
- /// MonitorUpdateId here.
+ /// MonitorId here.
pub chain_sync_monitor_persistences: Mutex<VecDeque<OutPoint>>,
/// When we get an update_persisted_channel call *with* a ChannelMonitorUpdate, we insert the
- /// MonitorUpdateId here.
- pub offchain_monitor_updates: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
+ /// [`ChannelMonitor::get_latest_update_id`] here.
+ ///
+ /// [`ChannelMonitor`]: channelmonitor::ChannelMonitor
+ pub offchain_monitor_updates: Mutex<HashMap<OutPoint, HashSet<u64>>>,
}
impl TestPersister {
pub fn new() -> Self {
}
}
impl<Signer: sign::ecdsa::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for TestPersister {
- fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+ fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>) -> chain::ChannelMonitorUpdateStatus {
if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() {
return update_ret
}
chain::ChannelMonitorUpdateStatus::Completed
}
- fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+ fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>) -> chain::ChannelMonitorUpdateStatus {
let mut ret = chain::ChannelMonitorUpdateStatus::Completed;
if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() {
ret = update_ret;
}
- if update.is_some() {
- self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update_id);
+ if let Some(update) = update {
+ self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update.update_id);
} else {
self.chain_sync_monitor_persistences.lock().unwrap().push_back(funding_txo);
}