- chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
-
- // Connect B's commitment transaction, but only to the ChainMonitor/ChannelMonitor. The
- // channel is now closed, but the ChannelManager doesn't know that yet.
- let new_header = create_dummy_header(nodes[0].best_block_info().0, 0);
- nodes[0].chain_monitor.chain_monitor.transactions_confirmed(&new_header,
- &[(0, &remote_txn[0]), (1, &remote_txn[1])], nodes[0].best_block_info().1 + 1);
- assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
- nodes[0].chain_monitor.chain_monitor.best_block_updated(&new_header, nodes[0].best_block_info().1 + 1);
- assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty());
-
- // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass
- // the update through to the ChannelMonitor which will refuse it (as the channel is closed).
- chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
- unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, second_payment_hash,
- RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)
- ), false, APIError::MonitorUpdateInProgress, {});
- check_added_monitors!(nodes[0], 1);
+ chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+ chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+ connect_blocks(&nodes[1], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+ connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+
+ // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] * 2 blocks should trigger only 2 writes
+ // per monitor/channel.
+ assert_eq!(2 * 2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ assert_eq!(2, chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ assert_eq!(2, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+
+ // Test that monitors with pending_claims are persisted on every block.
+ // Now, close channel_2 i.e. b/w node-0 and node-2 to create pending_claim in node[0].
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_2, &nodes[2].node.get_our_node_id(), "Channel force-closed".to_string()).unwrap();
+ check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false,
+ [nodes[2].node.get_our_node_id()], 1000000);
+ check_closed_broadcast(&nodes[0], 1, true);
+ let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(close_tx.len(), 1);
+
+ mine_transaction(&nodes[2], &close_tx[0]);
+ check_added_monitors(&nodes[2], 1);
+ check_closed_broadcast(&nodes[2], 1, true);
+ check_closed_event!(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false,
+ [nodes[0].node.get_our_node_id()], 1000000);