Merge pull request #2656 from TheBlueMatt/2023-09-scoring-decay-timer
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Fri, 15 Dec 2023 20:06:30 +0000 (20:06 +0000)
committerGitHub <noreply@github.com>
Fri, 15 Dec 2023 20:06:30 +0000 (20:06 +0000)
Stop decaying liquidity information during scoring

ci/check-cfg-flags.py
ci/ci-tests.sh
lightning-background-processor/Cargo.toml
lightning-block-sync/Cargo.toml
lightning-block-sync/src/convert.rs
lightning-net-tokio/Cargo.toml
lightning-transaction-sync/Cargo.toml
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/mod.rs
lightning/src/util/persist.rs

index 85cbde8538408c812f8b0ec4e3dd65b925c958bd..02b598cd447d26a5577883cf17382cd5a2685ea3 100755 (executable)
@@ -86,6 +86,8 @@ def check_cfg_tag(cfg):
         pass
     elif cfg == "taproot":
         pass
+    elif cfg == "async_signing":
+        pass
     elif cfg == "require_route_graph_test":
         pass
     else:
index 11934a8307a6f0723aeeb7a5094f3853a0c0a981..374e3616c149d7a8578f6f21851d3aff352dfb34 100755 (executable)
@@ -171,7 +171,6 @@ if [ -f "$(which arm-none-eabi-gcc)" ]; then
        popd
 fi
 
-echo -e "\n\nTest Taproot builds"
-pushd lightning
+echo -e "\n\nTest cfg-flag builds"
 RUSTFLAGS="$RUSTFLAGS --cfg=taproot" cargo test --verbose --color always -p lightning
-popd
+RUSTFLAGS="$RUSTFLAGS --cfg=async_signing" cargo test --verbose --color always -p lightning
index 53358d43c9e8d8335cd2e6ecd1b6b55c83b74557..904c821fdb17929be252b95831bda1eefc7987e0 100644 (file)
@@ -26,7 +26,7 @@ lightning = { version = "0.0.118", path = "../lightning", default-features = fal
 lightning-rapid-gossip-sync = { version = "0.0.118", path = "../lightning-rapid-gossip-sync", default-features = false }
 
 [dev-dependencies]
-tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
+tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] }
 lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
 lightning-invoice = { version = "0.26.0", path = "../lightning-invoice" }
 lightning-persister = { version = "0.0.118", path = "../lightning-persister" }
index c7f7191ed55e2704b11e2be5e9e740668c42cc62..89584744047b2ea89835c641b94c67d3c4b0f531 100644 (file)
@@ -21,10 +21,10 @@ rpc-client = [ "serde_json", "chunked_transfer" ]
 bitcoin = "0.30.2"
 hex = { package = "hex-conservative", version = "0.1.1", default-features = false }
 lightning = { version = "0.0.118", path = "../lightning" }
-tokio = { version = "1.0", features = [ "io-util", "net", "time", "rt" ], optional = true }
+tokio = { version = "1.35", features = [ "io-util", "net", "time", "rt" ], optional = true }
 serde_json = { version = "1.0", optional = true }
 chunked_transfer = { version = "1.4", optional = true }
 
 [dev-dependencies]
 lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
-tokio = { version = "1.14", features = [ "macros", "rt" ] }
+tokio = { version = "1.35", features = [ "macros", "rt" ] }
index 0f9ab8c43baadcc8cb72eec2375a063e9c4ffb37..62b0d6e47cbbd303e19fde760b57d22339423009 100644 (file)
@@ -162,25 +162,8 @@ impl TryInto<(BlockHash, Option<u32>)> for JsonResponse {
 impl TryInto<Txid> for JsonResponse {
        type Error = std::io::Error;
        fn try_into(self) -> std::io::Result<Txid> {
-               match self.0.as_str() {
-                       None => Err(std::io::Error::new(
-                               std::io::ErrorKind::InvalidData,
-                               "expected JSON string",
-                       )),
-                       Some(hex_data) => match Vec::<u8>::from_hex(hex_data) {
-                               Err(_) => Err(std::io::Error::new(
-                                       std::io::ErrorKind::InvalidData,
-                                       "invalid hex data",
-                               )),
-                               Ok(txid_data) => match encode::deserialize(&txid_data) {
-                                       Err(_) => Err(std::io::Error::new(
-                                               std::io::ErrorKind::InvalidData,
-                                               "invalid txid",
-                                       )),
-                                       Ok(txid) => Ok(txid),
-                               },
-                       },
-               }
+               let hex_data = self.0.as_str().ok_or(Self::Error::new(std::io::ErrorKind::InvalidData, "expected JSON string" ))?;
+               Txid::from_str(hex_data).map_err(|err|Self::Error::new(std::io::ErrorKind::InvalidData, err.to_string() ))
        }
 }
 
@@ -622,7 +605,7 @@ pub(crate) mod tests {
                match TryInto::<Txid>::try_into(response) {
                        Err(e) => {
                                assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid hex data");
+                               assert_eq!(e.get_ref().unwrap().to_string(), "bad hex string length 6 (expected 64)");
                        }
                        Ok(_) => panic!("Expected error"),
                }
@@ -634,7 +617,7 @@ pub(crate) mod tests {
                match TryInto::<Txid>::try_into(response) {
                        Err(e) => {
                                assert_eq!(e.kind(), std::io::ErrorKind::InvalidData);
-                               assert_eq!(e.get_ref().unwrap().to_string(), "invalid txid");
+                               assert_eq!(e.get_ref().unwrap().to_string(), "bad hex string length 4 (expected 64)");
                        }
                        Ok(_) => panic!("Expected error"),
                }
@@ -650,6 +633,20 @@ pub(crate) mod tests {
                }
        }
 
+       #[test]
+       fn into_txid_from_bitcoind_rpc_json_response() {
+               let mut rpc_response = serde_json::json!(
+            {"error": "", "id": "770", "result": "7934f775149929a8b742487129a7c3a535dfb612f0b726cc67bc10bc2628f906"}
+
+        );
+        let r: std::io::Result<Txid> = JsonResponse(rpc_response.get_mut("result").unwrap().take())
+            .try_into();
+        assert_eq!(
+            r.unwrap().to_string(),
+            "7934f775149929a8b742487129a7c3a535dfb612f0b726cc67bc10bc2628f906"
+        );
+       }
+
        // TryInto<Transaction> can be used in two ways, first with plain hex response where data is
        // the hex encoded transaction (e.g. as a result of getrawtransaction) or as a JSON object
        // where the hex encoded transaction can be found in the hex field of the object (if present)
index e491dec5d458f413038592ff7db26e5849eb28f6..05c048d5168c5173bd73ae982d692ad390d7565a 100644 (file)
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
 [dependencies]
 bitcoin = "0.30.2"
 lightning = { version = "0.0.118", path = "../lightning" }
-tokio = { version = "1.0", features = [ "rt", "sync", "net", "time" ] }
+tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] }
 
 [dev-dependencies]
-tokio = { version = "1.14", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
+tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] }
 lightning = { version = "0.0.118", path = "../lightning", features = ["_test_utils"] }
index d712229b0a820b841579f883594dcfed7a5f44ac..65f44ed3a666424bb095e244bf2ba0a8f3acf5ca 100644 (file)
@@ -31,7 +31,7 @@ electrum-client = { version = "0.18.0", optional = true }
 
 [dev-dependencies]
 lightning = { version = "0.0.118", path = "../lightning", default-features = false, features = ["std", "_test_utils"] }
-tokio = { version = "1.14.0", features = ["full"] }
+tokio = { version = "1.35.0", features = ["full"] }
 
 [target.'cfg(not(no_download))'.dev-dependencies]
 electrsd = { version = "0.26.0", default-features = false, features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] }
index 375beb6d66cc0ce20fd30e0325a5ced8a8798bcf..050585ef2673f81a6f9caf8484de05d2fa2bf258 100644 (file)
@@ -2434,8 +2434,13 @@ impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
                                        .ok();
 
                                if funding_signed.is_none() {
-                                       log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
-                                       self.signer_pending_funding = true;
+                                       #[cfg(not(async_signing))] {
+                                               panic!("Failed to get signature for funding_signed");
+                                       }
+                                       #[cfg(async_signing)] {
+                                               log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
+                                               self.signer_pending_funding = true;
+                                       }
                                } else if self.signer_pending_funding {
                                        log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
                                        self.signer_pending_funding = false;
@@ -4259,7 +4264,7 @@ impl<SP: Deref> Channel<SP> where
 
        /// Indicates that the signer may have some signatures for us, so we should retry if we're
        /// blocked.
-       #[allow(unused)]
+       #[cfg(async_signing)]
        pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
                let commitment_update = if self.context.signer_pending_commitment_update {
                        self.get_last_commitment_update_for_send(logger).ok()
@@ -4363,11 +4368,16 @@ impl<SP: Deref> Channel<SP> where
                        }
                        update
                } else {
-                       if !self.context.signer_pending_commitment_update {
-                               log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
-                               self.context.signer_pending_commitment_update = true;
+                       #[cfg(not(async_signing))] {
+                               panic!("Failed to get signature for new commitment state");
+                       }
+                       #[cfg(async_signing)] {
+                               if !self.context.signer_pending_commitment_update {
+                                       log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
+                                       self.context.signer_pending_commitment_update = true;
+                               }
+                               return Err(());
                        }
-                       return Err(());
                };
                Ok(msgs::CommitmentUpdate {
                        update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
@@ -6448,9 +6458,14 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
                let funding_created = self.get_funding_created_msg(logger);
                if funding_created.is_none() {
-                       if !self.context.signer_pending_funding {
-                               log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
-                               self.context.signer_pending_funding = true;
+                       #[cfg(not(async_signing))] {
+                               panic!("Failed to get signature for new funding creation");
+                       }
+                       #[cfg(async_signing)] {
+                               if !self.context.signer_pending_funding {
+                                       log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
+                                       self.context.signer_pending_funding = true;
+                               }
                        }
                }
 
@@ -6796,7 +6811,7 @@ impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
 
        /// Indicates that the signer may have some signatures for us, so we should retry if we're
        /// blocked.
-       #[allow(unused)]
+       #[cfg(async_signing)]
        pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
                if self.context.signer_pending_funding && self.context.is_outbound() {
                        log_trace!(logger, "Signer unblocked a funding_created");
index f08096426ff7c9bef04629064e02b27082c0c738..9536a9366e18506fa586d96c325b7d725e677de2 100644 (file)
@@ -7322,8 +7322,7 @@ where
        /// attempted in every channel, or in the specifically provided channel.
        ///
        /// [`ChannelSigner`]: crate::sign::ChannelSigner
-       #[cfg(test)] // This is only implemented for one signer method, and should be private until we
-                    // actually finish implementing it fully.
+       #[cfg(async_signing)]
        pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
                let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
 
index 827d77419892ac7c9c0ba8a1a90bd09d1ee6ccd5..43ec34eaf610fae5253516f7febde751ba6016b0 100644 (file)
@@ -76,7 +76,7 @@ mod monitor_tests;
 #[cfg(test)]
 #[allow(unused_mut)]
 mod shutdown_tests;
-#[cfg(test)]
+#[cfg(all(test, async_signing))]
 #[allow(unused_mut)]
 mod async_signer_tests;
 
index a9f534ee4d3413df6489ea1988c13e75b13272b7..e63290620516ee0b6f62b7bb57c0c6d2b9369d49 100644 (file)
@@ -346,9 +346,10 @@ where
 ///
 /// # Pruning stale channel updates
 ///
-/// Stale updates are pruned when a full monitor is written. The old monitor is first read, and if
-/// that succeeds, updates in the range between the old and new monitors are deleted. The `lazy`
-/// flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
+/// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
+/// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
+/// are deleted.
+/// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
 /// will complete. However, stale updates are not a problem for data integrity, since updates are
 /// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
 ///
@@ -610,24 +611,6 @@ where
        ) -> chain::ChannelMonitorUpdateStatus {
                // Determine the proper key for this monitor
                let monitor_name = MonitorName::from(funding_txo);
-               let maybe_old_monitor = self.read_monitor(&monitor_name);
-               match maybe_old_monitor {
-                       Ok((_, ref old_monitor)) => {
-                               // Check that this key isn't already storing a monitor with a higher update_id
-                               // (collision)
-                               if old_monitor.get_latest_update_id() > monitor.get_latest_update_id() {
-                                       log_error!(
-                                               self.logger,
-                                               "Tried to write a monitor at the same outpoint {} with a higher update_id!",
-                                               monitor_name.as_str()
-                                       );
-                                       return chain::ChannelMonitorUpdateStatus::UnrecoverableError;
-                               }
-                       }
-                       // This means the channel monitor is new.
-                       Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
-                       _ => return chain::ChannelMonitorUpdateStatus::UnrecoverableError,
-               }
                // Serialize and write the new monitor
                let mut monitor_bytes = Vec::with_capacity(
                        MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
@@ -641,65 +624,12 @@ where
                        &monitor_bytes,
                ) {
                        Ok(_) => {
-                               // Assess cleanup. Typically, we'll clean up only between the last two known full
-                               // monitors.
-                               if let Ok((_, old_monitor)) = maybe_old_monitor {
-                                       let start = old_monitor.get_latest_update_id();
-                                       let end = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
-                                               // We don't want to clean the rest of u64, so just do possible pending
-                                               // updates. Note that we never write updates at
-                                               // `CLOSED_CHANNEL_UPDATE_ID`.
-                                               cmp::min(
-                                                       start.saturating_add(self.maximum_pending_updates),
-                                                       CLOSED_CHANNEL_UPDATE_ID - 1,
-                                               )
-                                       } else {
-                                               monitor.get_latest_update_id().saturating_sub(1)
-                                       };
-                                       // We should bother cleaning up only if there's at least one update
-                                       // expected.
-                                       for update_id in start..=end {
-                                               let update_name = UpdateName::from(update_id);
-                                               #[cfg(debug_assertions)]
-                                               {
-                                                       if let Ok(update) =
-                                                               self.read_monitor_update(&monitor_name, &update_name)
-                                                       {
-                                                               // Assert that we are reading what we think we are.
-                                                               debug_assert_eq!(update.update_id, update_name.0);
-                                                       } else if update_id != start && monitor.get_latest_update_id() != CLOSED_CHANNEL_UPDATE_ID
-                                                       {
-                                                               // We're deleting something we should know doesn't exist.
-                                                               panic!(
-                                                                       "failed to read monitor update {}",
-                                                                       update_name.as_str()
-                                                               );
-                                                       }
-                                                       // On closed channels, we will unavoidably try to read
-                                                       // non-existent updates since we have to guess at the range of
-                                                       // stale updates, so do nothing.
-                                               }
-                                               if let Err(e) = self.kv_store.remove(
-                                                       CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
-                                                       monitor_name.as_str(),
-                                                       update_name.as_str(),
-                                                       true,
-                                               ) {
-                                                       log_error!(
-                                                               self.logger,
-                                                               "error cleaning up channel monitor updates for monitor {}, reason: {}",
-                                                               monitor_name.as_str(),
-                                                               e
-                                                       );
-                                               };
-                                       }
-                               };
                                chain::ChannelMonitorUpdateStatus::Completed
                        }
                        Err(e) => {
                                log_error!(
                                        self.logger,
-                                       "error writing channel monitor {}/{}/{} reason: {}",
+                                       "Failed to write ChannelMonitor {}/{}/{} reason: {}",
                                        CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
                                        CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
                                        monitor_name.as_str(),
@@ -741,7 +671,7 @@ where
                                        Err(e) => {
                                                log_error!(
                                                        self.logger,
-                                                       "error writing channel monitor update {}/{}/{} reason: {}",
+                                                       "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
                                                        CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                        monitor_name.as_str(),
                                                        update_name.as_str(),
@@ -751,8 +681,41 @@ where
                                        }
                                }
                        } else {
-                               // We could write this update, but it meets criteria of our design that call for a full monitor write.
-                               self.persist_new_channel(funding_txo, monitor, monitor_update_call_id)
+                               let monitor_name = MonitorName::from(funding_txo);
+                               // In case of channel-close monitor update, we need to read old monitor before persisting
+                               // the new one in order to determine the cleanup range.
+                               let maybe_old_monitor = match monitor.get_latest_update_id() {
+                                       CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
+                                       _ => None
+                               };
+
+                               // We could write this update, but it meets criteria of our design that calls for a full monitor write.
+                               let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id);
+
+                               if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
+                                       let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
+                                               // If there is an error while reading old monitor, we skip clean up.
+                                               maybe_old_monitor.map(|(_, ref old_monitor)| {
+                                                       let start = old_monitor.get_latest_update_id();
+                                                       // We never persist an update with update_id = CLOSED_CHANNEL_UPDATE_ID
+                                                       let end = cmp::min(
+                                                               start.saturating_add(self.maximum_pending_updates),
+                                                               CLOSED_CHANNEL_UPDATE_ID - 1,
+                                                       );
+                                                       (start, end)
+                                               })
+                                       } else {
+                                               let end = monitor.get_latest_update_id();
+                                               let start = end.saturating_sub(self.maximum_pending_updates);
+                                               Some((start, end))
+                                       };
+
+                                       if let Some((start, end)) = cleanup_range {
+                                               self.cleanup_in_range(monitor_name, start, end);
+                                       }
+                               }
+
+                               monitor_update_status
                        }
                } else {
                        // There is no update given, so we must persist a new monitor.
@@ -761,6 +724,34 @@ where
        }
 }
 
+impl<K: Deref, L: Deref, ES: Deref, SP: Deref> MonitorUpdatingPersister<K, L, ES, SP>
+where
+       ES::Target: EntropySource + Sized,
+       K::Target: KVStore,
+       L::Target: Logger,
+       SP::Target: SignerProvider + Sized
+{
+       // Cleans up monitor updates for given monitor in range `start..=end`.
+       fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
+               for update_id in start..=end {
+                       let update_name = UpdateName::from(update_id);
+                       if let Err(e) = self.kv_store.remove(
+                               CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
+                               monitor_name.as_str(),
+                               update_name.as_str(),
+                               true,
+                       ) {
+                               log_error!(
+                                       self.logger,
+                                       "Failed to clean up channel monitor updates for monitor {}, reason: {}",
+                                       monitor_name.as_str(),
+                                       e
+                               );
+                       };
+               }
+       }
+}
+
 /// A struct representing a name for a monitor.
 #[derive(Debug)]
 struct MonitorName(String);
@@ -896,20 +887,21 @@ mod tests {
        #[test]
        fn persister_with_real_monitors() {
                // This value is used later to limit how many iterations we perform.
-               let test_max_pending_updates = 7;
+               let persister_0_max_pending_updates = 7;
+               // Intentionally set this to a smaller value to test a different alignment.
+               let persister_1_max_pending_updates = 3;
                let chanmon_cfgs = create_chanmon_cfgs(4);
                let persister_0 = MonitorUpdatingPersister {
                        kv_store: &TestStore::new(false),
                        logger: &TestLogger::new(),
-                       maximum_pending_updates: test_max_pending_updates,
+                       maximum_pending_updates: persister_0_max_pending_updates,
                        entropy_source: &chanmon_cfgs[0].keys_manager,
                        signer_provider: &chanmon_cfgs[0].keys_manager,
                };
                let persister_1 = MonitorUpdatingPersister {
                        kv_store: &TestStore::new(false),
                        logger: &TestLogger::new(),
-                       // Intentionally set this to a smaller value to test a different alignment.
-                       maximum_pending_updates: 3,
+                       maximum_pending_updates: persister_1_max_pending_updates,
                        entropy_source: &chanmon_cfgs[1].keys_manager,
                        signer_provider: &chanmon_cfgs[1].keys_manager,
                };
@@ -934,7 +926,6 @@ mod tests {
                node_cfgs[1].chain_monitor = chain_mon_1;
                let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
                let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
                let broadcaster_0 = &chanmon_cfgs[2].tx_broadcaster;
                let broadcaster_1 = &chanmon_cfgs[3].tx_broadcaster;
 
@@ -957,10 +948,11 @@ mod tests {
                                for (_, mon) in persisted_chan_data_0.iter() {
                                        // check that when we read it, we got the right update id
                                        assert_eq!(mon.get_latest_update_id(), $expected_update_id);
-                                       // if the CM is at the correct update id without updates, ensure no updates are stored
+
+                                       // if the CM is at consolidation threshold, ensure no updates are stored.
                                        let monitor_name = MonitorName::from(mon.get_funding_txo().0);
-                                       let (_, cm_0) = persister_0.read_monitor(&monitor_name).unwrap();
-                                       if cm_0.get_latest_update_id() == $expected_update_id {
+                                       if mon.get_latest_update_id() % persister_0_max_pending_updates == 0
+                                                       || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
                                                assert_eq!(
                                                        persister_0.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                                monitor_name.as_str()).unwrap().len(),
@@ -975,8 +967,9 @@ mod tests {
                                for (_, mon) in persisted_chan_data_1.iter() {
                                        assert_eq!(mon.get_latest_update_id(), $expected_update_id);
                                        let monitor_name = MonitorName::from(mon.get_funding_txo().0);
-                                       let (_, cm_1) = persister_1.read_monitor(&monitor_name).unwrap();
-                                       if cm_1.get_latest_update_id() == $expected_update_id {
+                                       // if the CM is at consolidation threshold, ensure no updates are stored.
+                                       if mon.get_latest_update_id() % persister_1_max_pending_updates == 0
+                                                       || mon.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID {
                                                assert_eq!(
                                                        persister_1.kv_store.list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
                                                                monitor_name.as_str()).unwrap().len(),
@@ -1001,7 +994,7 @@ mod tests {
                // Send a few more payments to try all the alignments of max pending updates with
                // updates for a payment sent and received.
                let mut sender = 0;
-               for i in 3..=test_max_pending_updates * 2 {
+               for i in 3..=persister_0_max_pending_updates * 2 {
                        let receiver;
                        if sender == 0 {
                                sender = 1;