1 use lightning::util::persist::{KVStore, KVSTORE_NAMESPACE_KEY_MAX_LEN, read_channel_monitors};
2 use lightning::ln::functional_test_utils::{connect_block, create_announced_chan_between_nodes,
3 create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs,
5 use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
6 use lightning::util::test_utils;
7 use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
8 use lightning::events::ClosureReason;
10 use std::panic::RefUnwindSafe;
12 pub(crate) fn do_read_write_remove_list_persist<K: KVStore + RefUnwindSafe>(kv_store: &K) {
13 let data = [42u8; 32];
15 let primary_namespace = "testspace";
16 let secondary_namespace = "testsubspace";
19 // Test the basic KVStore operations.
20 kv_store.write(primary_namespace, secondary_namespace, key, &data).unwrap();
22 // Test empty primary_namespace/secondary_namespace is allowed, but not empty primary_namespace
23 // and non-empty secondary_namespace, and not empty key.
24 kv_store.write("", "", key, &data).unwrap();
25 let res = std::panic::catch_unwind(|| kv_store.write("", secondary_namespace, key, &data));
26 assert!(res.is_err());
27 let res = std::panic::catch_unwind(|| kv_store.write(primary_namespace, secondary_namespace, "", &data));
28 assert!(res.is_err());
30 let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap();
31 assert_eq!(listed_keys.len(), 1);
32 assert_eq!(listed_keys[0], key);
34 let read_data = kv_store.read(primary_namespace, secondary_namespace, key).unwrap();
35 assert_eq!(data, &*read_data);
37 kv_store.remove(primary_namespace, secondary_namespace, key, false).unwrap();
39 let listed_keys = kv_store.list(primary_namespace, secondary_namespace).unwrap();
40 assert_eq!(listed_keys.len(), 0);
42 // Ensure we have no issue operating with primary_namespace/secondary_namespace/key being
43 // KVSTORE_NAMESPACE_KEY_MAX_LEN
44 let max_chars: String = std::iter::repeat('A').take(KVSTORE_NAMESPACE_KEY_MAX_LEN).collect();
45 kv_store.write(&max_chars, &max_chars, &max_chars, &data).unwrap();
47 let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
48 assert_eq!(listed_keys.len(), 1);
49 assert_eq!(listed_keys[0], max_chars);
51 let read_data = kv_store.read(&max_chars, &max_chars, &max_chars).unwrap();
52 assert_eq!(data, &*read_data);
54 kv_store.remove(&max_chars, &max_chars, &max_chars, false).unwrap();
56 let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
57 assert_eq!(listed_keys.len(), 0);
60 // Integration-test the given KVStore implementation. Test relaying a few payments and check that
61 // the persisted data is updated the appropriate number of times.
62 pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
63 let chanmon_cfgs = create_chanmon_cfgs(2);
64 let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
65 let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, store_0, node_cfgs[0].keys_manager);
66 let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, store_1, node_cfgs[1].keys_manager);
67 node_cfgs[0].chain_monitor = chain_mon_0;
68 node_cfgs[1].chain_monitor = chain_mon_1;
69 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
70 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
72 // Check that the persisted channel data is empty before any channels are
74 let mut persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
75 assert_eq!(persisted_chan_data_0.len(), 0);
76 let mut persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
77 assert_eq!(persisted_chan_data_1.len(), 0);
79 // Helper to make sure the channel is on the expected update ID.
80 macro_rules! check_persisted_data {
81 ($expected_update_id: expr) => {
82 persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
83 assert_eq!(persisted_chan_data_0.len(), 1);
84 for (_, mon) in persisted_chan_data_0.iter() {
85 assert_eq!(mon.get_latest_update_id(), $expected_update_id);
87 persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
88 assert_eq!(persisted_chan_data_1.len(), 1);
89 for (_, mon) in persisted_chan_data_1.iter() {
90 assert_eq!(mon.get_latest_update_id(), $expected_update_id);
95 // Create some initial channel and check that a channel was persisted.
96 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
97 check_persisted_data!(0);
99 // Send a few payments and make sure the monitors are updated to the latest.
100 send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
101 check_persisted_data!(5);
102 send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
103 check_persisted_data!(10);
105 // Force close because cooperative close doesn't result in any persisted
107 nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
108 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
109 check_closed_broadcast!(nodes[0], true);
110 check_added_monitors!(nodes[0], 1);
112 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
113 assert_eq!(node_txn.len(), 1);
115 connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
116 check_closed_broadcast!(nodes[1], true);
117 check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
118 check_added_monitors!(nodes[1], 1);
120 // Make sure everything is persisted as expected after close.
121 check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);