]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Remove MonitorUpdateId from persist trait
authorGursharan Singh <3442979+G8XSU@users.noreply.github.com>
Fri, 12 Apr 2024 06:31:30 +0000 (23:31 -0700)
committerGursharan Singh <3442979+G8XSU@users.noreply.github.com>
Sat, 27 Apr 2024 03:28:55 +0000 (20:28 -0700)
MonitorUpdateId was an opaque abstraction for id's generated by
UpdateOrigin:Offchain and UpdateOrigin::ChainSync monitor updates.
It was mainly needed to map calls made to
ChainMonitor::channel_monitor_updated. We no longer track
UpdateOrigin::ChainSync MonitorUpdates and can directly use
ChannelMonitor::get_latest_update_id() for tracking
UpdateOrigin::Offchain monitor updates.

fuzz/src/utils/test_persister.rs
lightning-persister/src/fs_store.rs
lightning/src/chain/chainmonitor.rs
lightning/src/util/persist.rs
lightning/src/util/test_utils.rs

index f9a03d16178ac224bce66333d14fc8d544124310..a99c397d0b2341c8c4b8b6d76b1f13a345c85540 100644 (file)
@@ -1,6 +1,5 @@
 use lightning::chain;
 use lightning::chain::{chainmonitor, channelmonitor};
-use lightning::chain::chainmonitor::MonitorUpdateId;
 use lightning::chain::transaction::OutPoint;
 use lightning::util::test_channel_signer::TestChannelSigner;
 
@@ -10,11 +9,11 @@ pub struct TestPersister {
        pub update_ret: Mutex<chain::ChannelMonitorUpdateStatus>,
 }
 impl chainmonitor::Persist<TestChannelSigner> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 
-       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<TestChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                self.update_ret.lock().unwrap().clone()
        }
 
index 014dde3e0374efb2dc5fbcd0827ad60d6d0b373a..8a144f6196b814104665572366f17c2b7ba02d72 100644 (file)
@@ -448,8 +448,6 @@ mod tests {
                nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
                check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
                let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-               let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-               let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
 
                // Set the store's directory to read-only, which should result in
                // returning an unrecoverable failure when we then attempt to persist a
@@ -463,7 +461,7 @@ mod tests {
                        txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
                        index: 0
                };
-               match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+               match store.persist_new_channel(test_txo, &added_monitors[0].1) {
                        ChannelMonitorUpdateStatus::UnrecoverableError => {},
                        _ => panic!("unexpected result from persisting new channel")
                }
index fb2bfc3fdc8c9a32773535ae731ea9ef2c1362cd..06052dc84de52dd3d7257e55ec0aa4bed5ccc9e2 100644 (file)
@@ -47,46 +47,6 @@ use core::ops::Deref;
 use core::sync::atomic::{AtomicUsize, Ordering};
 use bitcoin::secp256k1::PublicKey;
 
-mod update_origin {
-       #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
-       /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents
-       /// entirely opaque.
-       pub(crate) enum UpdateOrigin {
-               /// An update that was generated by the `ChannelManager` (via our [`crate::chain::Watch`]
-               /// implementation). This corresponds to an actual [ChannelMonitorUpdate::update_id] field
-               /// and [ChannelMonitor::get_latest_update_id].
-               ///
-               /// [ChannelMonitor::get_latest_update_id]: crate::chain::channelmonitor::ChannelMonitor::get_latest_update_id
-               /// [ChannelMonitorUpdate::update_id]: crate::chain::channelmonitor::ChannelMonitorUpdate::update_id
-               OffChain(u64),
-               /// An update that was generated during blockchain processing. The ID here is specific to the
-               /// generating [ChannelMonitor] and does *not* correspond to any on-disk IDs.
-               ///
-               /// [ChannelMonitor]: crate::chain::channelmonitor::ChannelMonitor
-               ChainSync(u64),
-       }
-}
-
-#[cfg(any(feature = "_test_utils", test))]
-pub(crate) use update_origin::UpdateOrigin;
-#[cfg(not(any(feature = "_test_utils", test)))]
-use update_origin::UpdateOrigin;
-
-/// An opaque identifier describing a specific [`Persist`] method call.
-#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
-pub struct MonitorUpdateId {
-       pub(crate) contents: UpdateOrigin,
-}
-
-impl MonitorUpdateId {
-       pub(crate) fn from_monitor_update(update: &ChannelMonitorUpdate) -> Self {
-               Self { contents: UpdateOrigin::OffChain(update.update_id) }
-       }
-       pub(crate) fn from_new_monitor<ChannelSigner: WriteableEcdsaChannelSigner>(monitor: &ChannelMonitor<ChannelSigner>) -> Self {
-               Self { contents: UpdateOrigin::OffChain(monitor.get_latest_update_id()) }
-       }
-}
-
 /// `Persist` defines behavior for persisting channel monitors: this could mean
 /// writing once to disk, and/or uploading to one or more backup services.
 ///
@@ -119,7 +79,7 @@ impl MonitorUpdateId {
 ///  All calls should generally spawn a background task and immediately return
 ///  [`ChannelMonitorUpdateStatus::InProgress`]. Once the update completes,
 ///  [`ChainMonitor::channel_monitor_updated`] should be called with the corresponding
-///  [`MonitorUpdateId`].
+///  [`ChannelMonitor::get_latest_update_id`] or [`ChannelMonitorUpdate::update_id`].
 ///
 ///  Note that unlike the direct [`chain::Watch`] interface,
 ///  [`ChainMonitor::channel_monitor_updated`] must be called once for *each* update which occurs.
@@ -150,15 +110,16 @@ pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
        /// channel's outpoint (and it is up to you to maintain a correct mapping between the outpoint
        /// and the stored channel data). Note that you **must** persist every new monitor to disk.
        ///
-       /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
-       /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
+       /// The [`ChannelMonitor::get_latest_update_id`] uniquely links this call to [`ChainMonitor::channel_monitor_updated`].
+       /// For [`Persist::persist_new_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
+       /// when you return [`ChannelMonitorUpdateStatus::InProgress`].
        ///
        /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`
        /// and [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
        ///
        /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
        /// [`Writeable::write`]: crate::util::ser::Writeable::write
-       fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+       fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
 
        /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given
        /// update.
@@ -185,15 +146,17 @@ pub trait Persist<ChannelSigner: WriteableEcdsaChannelSigner> {
        /// them in batches. The size of each monitor grows `O(number of state updates)`
        /// whereas updates are small and `O(1)`.
        ///
-       /// The `update_id` is used to identify this call to [`ChainMonitor::channel_monitor_updated`],
-       /// if you return [`ChannelMonitorUpdateStatus::InProgress`].
+       /// The [`ChannelMonitorUpdate::update_id`] or [`ChannelMonitor::get_latest_update_id`] uniquely
+       /// links this call to [`ChainMonitor::channel_monitor_updated`].
+       /// For [`Persist::update_persisted_channel`], it is only necessary to call [`ChainMonitor::channel_monitor_updated`]
+       /// when an [`ChannelMonitorUpdate`] is provided and when you return [`ChannelMonitorUpdateStatus::InProgress`].
        ///
        /// See [`Writeable::write`] on [`ChannelMonitor`] for writing out a `ChannelMonitor`,
        /// [`Writeable::write`] on [`ChannelMonitorUpdate`] for writing out an update, and
        /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors.
        ///
        /// [`Writeable::write`]: crate::util::ser::Writeable::write
-       fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus;
+       fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>) -> ChannelMonitorUpdateStatus;
        /// Prevents the channel monitor from being loaded on startup.
        ///
        /// Archiving the data in a backup location (rather than deleting it fully) is useful for
@@ -209,13 +172,12 @@ struct MonitorHolder<ChannelSigner: WriteableEcdsaChannelSigner> {
        /// update_persisted_channel, the user returns a
        /// [`ChannelMonitorUpdateStatus::InProgress`], and then calls channel_monitor_updated
        /// immediately, racing our insertion of the pending update into the contained Vec.
-       pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
+       pending_monitor_updates: Mutex<Vec<u64>>,
 }
 
 impl<ChannelSigner: WriteableEcdsaChannelSigner> MonitorHolder<ChannelSigner> {
-       fn has_pending_offchain_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<MonitorUpdateId>>) -> bool {
-               pending_monitor_updates_lock.iter().any(|update_id|
-                       if let UpdateOrigin::OffChain(_) = update_id.contents { true } else { false })
+       fn has_pending_updates(&self, pending_monitor_updates_lock: &MutexGuard<Vec<u64>>) -> bool {
+               !pending_monitor_updates_lock.is_empty()
        }
 }
 
@@ -259,7 +221,7 @@ pub struct ChainMonitor<ChannelSigner: WriteableEcdsaChannelSigner, C: Deref, T:
         P::Target: Persist<ChannelSigner>,
 {
        monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
-       /// When we generate a [`MonitorUpdateId`] for a chain-event monitor persistence, we need a
+       /// When we generate a monitor update for a chain-event monitor persistence, we need a
        /// unique ID, which we calculate by simply getting the next value from this counter. Note that
        /// the ID is never persisted so it's ok that they reset on restart.
        sync_persistence_id: AtomicCounter,
@@ -346,20 +308,11 @@ where C::Target: chain::Filter,
                let mut txn_outputs;
                {
                        txn_outputs = process(monitor, txdata);
-                       let chain_sync_update_id = self.sync_persistence_id.get_increment();
-                       let update_id = MonitorUpdateId {
-                               contents: UpdateOrigin::ChainSync(chain_sync_update_id),
-                       };
-
-                       log_trace!(logger, "Syncing Channel Monitor for channel {} for block-data update_id {}",
-                               log_funding_info!(monitor),
-                               chain_sync_update_id
-                       );
-                       match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) {
+                       log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
+                       match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) {
                                ChannelMonitorUpdateStatus::Completed =>
-                                       log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data update_id {}",
-                                               log_funding_info!(monitor),
-                                               chain_sync_update_id
+                                       log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data",
+                                               log_funding_info!(monitor)
                                        ),
                                ChannelMonitorUpdateStatus::InProgress => {
                                        log_debug!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor));
@@ -464,7 +417,10 @@ where C::Target: chain::Filter,
 
        #[cfg(not(c_bindings))]
        /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
-       pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<MonitorUpdateId>> {
+       /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
+       /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
+       /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
+       pub fn list_pending_monitor_updates(&self) -> HashMap<OutPoint, Vec<u64>> {
                hash_map_from_iter(self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
                        (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
                }))
@@ -472,7 +428,10 @@ where C::Target: chain::Filter,
 
        #[cfg(c_bindings)]
        /// Lists the pending updates for each [`ChannelMonitor`] (by `OutPoint` being monitored).
-       pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<MonitorUpdateId>)> {
+       /// Each `Vec<u64>` contains `update_id`s from [`ChannelMonitor::get_latest_update_id`] for updates
+       /// that have not yet been fully persisted. Note that if a full monitor is persisted all the pending
+       /// monitor updates must be individually marked completed by calling [`ChainMonitor::channel_monitor_updated`].
+       pub fn list_pending_monitor_updates(&self) -> Vec<(OutPoint, Vec<u64>)> {
                self.monitors.read().unwrap().iter().map(|(outpoint, holder)| {
                        (*outpoint, holder.pending_monitor_updates.lock().unwrap().clone())
                }).collect()
@@ -491,16 +450,20 @@ where C::Target: chain::Filter,
        ///  1) This [`ChainMonitor`] calls [`Persist::update_persisted_channel`] which stores the
        ///     update to disk and begins updating any remote (e.g. watchtower/backup) copies,
        ///     returning [`ChannelMonitorUpdateStatus::InProgress`],
-       ///  2) once all remote copies are updated, you call this function with the
-       ///     `completed_update_id` that completed, and once all pending updates have completed the
-       ///     channel will be re-enabled.
-       //      Note that we re-enable only after `UpdateOrigin::OffChain` updates complete, we don't
-       //      care about `UpdateOrigin::ChainSync` updates for the channel state being updated. We
-       //      only care about `UpdateOrigin::ChainSync` for returning `MonitorEvent`s.
+       ///  2) once all remote copies are updated, you call this function with [`ChannelMonitor::get_latest_update_id`]
+       ///     or [`ChannelMonitorUpdate::update_id`] as the `completed_update_id`, and once all pending
+       ///     updates have completed the channel will be re-enabled.
+       ///
+       /// It is only necessary to call [`ChainMonitor::channel_monitor_updated`] when you return [`ChannelMonitorUpdateStatus::InProgress`]
+       /// from [`Persist`] and either:
+       ///   1. A new [`ChannelMonitor`] was added in [`Persist::persist_new_channel`], or
+       ///   2. A [`ChannelMonitorUpdate`] was provided as part of [`Persist::update_persisted_channel`].
+       /// Note that we don't care about calls to [`Persist::update_persisted_channel`] where no
+       /// [`ChannelMonitorUpdate`] was provided.
        ///
        /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently
        /// registered [`ChannelMonitor`]s.
-       pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> {
+       pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: u64) -> Result<(), APIError> {
                let monitors = self.monitors.read().unwrap();
                let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else {
                        return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) });
@@ -508,39 +471,28 @@ where C::Target: chain::Filter,
                let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap();
                pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
 
-               match completed_update_id {
-                       MonitorUpdateId { contents: UpdateOrigin::OffChain(completed_update_id) } => {
-                               // Note that we only check for `UpdateOrigin::OffChain` failures here - if
-                               // we're being told that a `UpdateOrigin::OffChain` monitor update completed,
-                               // we only care about ensuring we don't tell the `ChannelManager` to restore
-                               // the channel to normal operation until all `UpdateOrigin::OffChain` updates
-                               // complete.
-                               // If there's some `UpdateOrigin::ChainSync` update still pending that's okay
-                               // - we can still update our channel state, just as long as we don't return
-                               // `MonitorEvent`s from the monitor back to the `ChannelManager` until they
-                               // complete.
-                               let monitor_is_pending_updates = monitor_data.has_pending_offchain_updates(&pending_monitor_updates);
-                               log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
-                                       completed_update_id,
-                                       funding_txo,
-                                       if monitor_is_pending_updates {
-                                               "still have pending off-chain updates"
-                                       } else {
-                                               "all off-chain updates complete, returning a MonitorEvent"
-                                       });
-                               if monitor_is_pending_updates {
-                                       // If there are still monitor updates pending, we cannot yet construct a
-                                       // Completed event.
-                                       return Ok(());
-                               }
-                               let channel_id = monitor_data.monitor.channel_id();
-                               self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
-                                       funding_txo, channel_id,
-                                       monitor_update_id: monitor_data.monitor.get_latest_update_id(),
-                               }], monitor_data.monitor.get_counterparty_node_id()));
-                       },
-                       MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => {},
+               // Note that we only check for pending non-chainsync monitor updates and we don't track monitor
+               // updates resulting from chainsync in `pending_monitor_updates`.
+               let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates);
+               log_debug!(self.logger, "Completed off-chain monitor update {} for channel with funding outpoint {:?}, {}",
+                       completed_update_id,
+                       funding_txo,
+                       if monitor_is_pending_updates {
+                               "still have pending off-chain updates"
+                       } else {
+                               "all off-chain updates complete, returning a MonitorEvent"
+                       });
+               if monitor_is_pending_updates {
+                       // If there are still monitor updates pending, we cannot yet construct a
+                       // Completed event.
+                       return Ok(());
                }
+               let channel_id = monitor_data.monitor.channel_id();
+               self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed {
+                       funding_txo, channel_id,
+                       monitor_update_id: monitor_data.monitor.get_latest_update_id(),
+               }], monitor_data.monitor.get_counterparty_node_id()));
+
                self.event_notifier.notify();
                Ok(())
        }
@@ -771,9 +723,9 @@ where C::Target: chain::Filter,
                        hash_map::Entry::Vacant(e) => e,
                };
                log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor));
-               let update_id = MonitorUpdateId::from_new_monitor(&monitor);
+               let update_id = monitor.get_latest_update_id();
                let mut pending_monitor_updates = Vec::new();
-               let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id);
+               let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor);
                match persist_res {
                        ChannelMonitorUpdateStatus::InProgress => {
                                log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor));
@@ -823,7 +775,7 @@ where C::Target: chain::Filter,
                                log_trace!(logger, "Updating ChannelMonitor to id {} for channel {}", update.update_id, log_funding_info!(monitor));
                                let update_res = monitor.update_monitor(update, &self.broadcaster, &self.fee_estimator, &self.logger);
 
-                               let update_id = MonitorUpdateId::from_monitor_update(update);
+                               let update_id = update.update_id;
                                let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
                                let persist_res = if update_res.is_err() {
                                        // Even if updating the monitor returns an error, the monitor's state will
@@ -832,9 +784,9 @@ where C::Target: chain::Filter,
                                        // while reading `channel_monitor` with updates from storage. Instead, we should persist
                                        // the entire `channel_monitor` here.
                                        log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor));
-                                       self.persister.update_persisted_channel(funding_txo, None, monitor, update_id)
+                                       self.persister.update_persisted_channel(funding_txo, None, monitor)
                                } else {
-                                       self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id)
+                                       self.persister.update_persisted_channel(funding_txo, Some(update), monitor)
                                };
                                match persist_res {
                                        ChannelMonitorUpdateStatus::InProgress => {
index 249a089cd4883170be76bf8574855379ab48c643..46f82d2c113bce38daae9f56a56e6036ddf13be7 100644 (file)
@@ -20,7 +20,7 @@ use crate::prelude::*;
 
 use crate::chain;
 use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
-use crate::chain::chainmonitor::{Persist, MonitorUpdateId};
+use crate::chain::chainmonitor::Persist;
 use crate::sign::{EntropySource, ecdsa::WriteableEcdsaChannelSigner, SignerProvider};
 use crate::chain::transaction::OutPoint;
 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, CLOSED_CHANNEL_UPDATE_ID};
@@ -208,7 +208,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore + ?Sized> Persist<Ch
        // Then we should return InProgress rather than UnrecoverableError, implying we should probably
        // just shut down the node since we're not retrying persistence!
 
-       fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
                match self.write(
                        CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
@@ -220,7 +220,7 @@ impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore + ?Sized> Persist<Ch
                }
        }
 
-       fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>) -> chain::ChannelMonitorUpdateStatus {
                let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
                match self.write(
                        CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
@@ -648,8 +648,7 @@ where
        /// Persists a new channel. This means writing the entire monitor to the
        /// parametrized [`KVStore`].
        fn persist_new_channel(
-               &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
-               _monitor_update_call_id: MonitorUpdateId,
+               &self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>
        ) -> chain::ChannelMonitorUpdateStatus {
                // Determine the proper key for this monitor
                let monitor_name = MonitorName::from(funding_txo);
@@ -693,10 +692,8 @@ where
        ///   - The update is at [`CLOSED_CHANNEL_UPDATE_ID`]
        fn update_persisted_channel(
                &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
-               monitor: &ChannelMonitor<ChannelSigner>, monitor_update_call_id: MonitorUpdateId,
+               monitor: &ChannelMonitor<ChannelSigner>
        ) -> chain::ChannelMonitorUpdateStatus {
-               // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with
-               // ChannelMonitorUpdate's update_id.
                if let Some(update) = update {
                        if update.update_id != CLOSED_CHANNEL_UPDATE_ID
                                && update.update_id % self.maximum_pending_updates != 0
@@ -732,7 +729,7 @@ where
                                };
 
                                // We could write this update, but it meets criteria of our design that calls for a full monitor write.
-                               let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
+                               let monitor_update_status = self.persist_new_channel(funding_txo, monitor);
 
                                if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
                                        let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
@@ -761,7 +758,7 @@ where
                        }
                } else {
                        // There is no update given, so we must persist a new monitor.
-                       self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
+                       self.persist_new_channel(funding_txo, monitor)
                }
        }
 
@@ -1117,8 +1114,6 @@ mod tests {
                check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
                {
                        let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
-                       let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
-                       let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap();
                        let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
                        let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
                        let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
@@ -1130,7 +1125,7 @@ mod tests {
                                entropy_source: node_cfgs[0].keys_manager,
                                signer_provider: node_cfgs[0].keys_manager,
                        };
-                       match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+                       match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1) {
                                ChannelMonitorUpdateStatus::UnrecoverableError => {
                                        // correct result
                                }
@@ -1141,7 +1136,7 @@ mod tests {
                                        panic!("Returned InProgress when shouldn't have")
                                }
                        }
-                       match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) {
+                       match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1) {
                                ChannelMonitorUpdateStatus::UnrecoverableError => {
                                        // correct result
                                }
index b9672dc405f39a5cf5092e559403a005fe3550af..642e548e606dbd0f19dd9b899258240b39bdd740 100644 (file)
@@ -16,7 +16,6 @@ use crate::chain::chaininterface::ConfirmationTarget;
 #[cfg(test)]
 use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW;
 use crate::chain::chainmonitor;
-use crate::chain::chainmonitor::{MonitorUpdateId};
 use crate::chain::channelmonitor;
 use crate::chain::channelmonitor::MonitorEvent;
 use crate::chain::transaction::OutPoint;
@@ -311,7 +310,7 @@ impl SignerProvider for OnlyReadsKeysInterface {
 pub struct TestChainMonitor<'a> {
        pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor<TestChannelSigner>)>>,
        pub monitor_updates: Mutex<HashMap<ChannelId, Vec<channelmonitor::ChannelMonitorUpdate>>>,
-       pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, MonitorUpdateId)>>,
+       pub latest_monitor_update_id: Mutex<HashMap<ChannelId, (OutPoint, u64, u64)>>,
        pub chain_monitor: chainmonitor::ChainMonitor<TestChannelSigner, &'a TestChainSource, &'a dyn chaininterface::BroadcasterInterface, &'a TestFeeEstimator, &'a TestLogger, &'a dyn chainmonitor::Persist<TestChannelSigner>>,
        pub keys_manager: &'a TestKeysInterface,
        /// If this is set to Some(), the next update_channel call (not watch_channel) must be a
@@ -350,7 +349,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
                        &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1;
                assert!(new_monitor == monitor);
                self.latest_monitor_update_id.lock().unwrap().insert(monitor.channel_id(),
-                       (funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor)));
+                       (funding_txo, monitor.get_latest_update_id(), monitor.get_latest_update_id()));
                self.added_monitors.lock().unwrap().push((funding_txo, monitor));
                self.chain_monitor.watch_channel(funding_txo, new_monitor)
        }
@@ -374,7 +373,7 @@ impl<'a> chain::Watch<TestChannelSigner> for TestChainMonitor<'a> {
                }
 
                self.latest_monitor_update_id.lock().unwrap().insert(channel_id,
-                       (funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(update)));
+                       (funding_txo, update.update_id, update.update_id));
                let update_res = self.chain_monitor.update_channel(funding_txo, update);
                // At every point where we get a monitor update, we should be able to send a useful monitor
                // to a watchtower and disk...
@@ -453,9 +452,9 @@ impl WatchtowerPersister {
 #[cfg(test)]
 impl<Signer: sign::ecdsa::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for WatchtowerPersister {
        fn persist_new_channel(&self, funding_txo: OutPoint,
-               data: &channelmonitor::ChannelMonitor<Signer>, id: MonitorUpdateId
+               data: &channelmonitor::ChannelMonitor<Signer>
        ) -> chain::ChannelMonitorUpdateStatus {
-               let res = self.persister.persist_new_channel(funding_txo, data, id);
+               let res = self.persister.persist_new_channel(funding_txo, data);
 
                assert!(self.unsigned_justice_tx_data.lock().unwrap()
                        .insert(funding_txo, VecDeque::new()).is_none());
@@ -475,9 +474,9 @@ impl<Signer: sign::ecdsa::WriteableEcdsaChannelSigner> chainmonitor::Persist<Sig
 
        fn update_persisted_channel(
                &self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>,
-               data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId
+               data: &channelmonitor::ChannelMonitor<Signer>
        ) -> chain::ChannelMonitorUpdateStatus {
-               let res = self.persister.update_persisted_channel(funding_txo, update, data, update_id);
+               let res = self.persister.update_persisted_channel(funding_txo, update, data);
 
                if let Some(update) = update {
                        let commitment_txs = data.counterparty_commitment_txs_from_update(update);
@@ -515,11 +514,13 @@ pub struct TestPersister {
        /// returned.
        pub update_rets: Mutex<VecDeque<chain::ChannelMonitorUpdateStatus>>,
        /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
-       /// MonitorUpdateId here.
+       /// MonitorId here.
        pub chain_sync_monitor_persistences: Mutex<VecDeque<OutPoint>>,
        /// When we get an update_persisted_channel call *with* a ChannelMonitorUpdate, we insert the
-       /// MonitorUpdateId here.
-       pub offchain_monitor_updates: Mutex<HashMap<OutPoint, HashSet<MonitorUpdateId>>>,
+       /// [`ChannelMonitor::get_latest_update_id`] here.
+       ///
+       /// [`ChannelMonitor`]: channelmonitor::ChannelMonitor
+       pub offchain_monitor_updates: Mutex<HashMap<OutPoint, HashSet<u64>>>,
 }
 impl TestPersister {
        pub fn new() -> Self {
@@ -536,21 +537,21 @@ impl TestPersister {
        }
 }
 impl<Signer: sign::ecdsa::WriteableEcdsaChannelSigner> chainmonitor::Persist<Signer> for TestPersister {
-       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor<Signer>) -> chain::ChannelMonitorUpdateStatus {
                if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() {
                        return update_ret
                }
                chain::ChannelMonitorUpdateStatus::Completed
        }
 
-       fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
+       fn update_persisted_channel(&self, funding_txo: OutPoint, update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>) -> chain::ChannelMonitorUpdateStatus {
                let mut ret = chain::ChannelMonitorUpdateStatus::Completed;
                if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() {
                        ret = update_ret;
                }
 
-               if update.is_some()  {
-                       self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update_id);
+               if let Some(update) = update  {
+                       self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update.update_id);
                } else {
                        self.chain_sync_monitor_persistences.lock().unwrap().push_back(funding_txo);
                }