Rename `PaymentReceived` to `PaymentClaimable`
[rust-lightning] / lightning / src / ln / reload_tests.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! Functional tests which test for correct behavior across node restarts.
11
12 use crate::chain::{ChannelMonitorUpdateStatus, Watch};
13 use crate::chain::channelmonitor::ChannelMonitor;
14 use crate::chain::transaction::OutPoint;
15 use crate::ln::channelmanager::{self, ChannelManager, ChannelManagerReadArgs, PaymentId};
16 use crate::ln::msgs;
17 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
18 use crate::util::enforcing_trait_impls::EnforcingSigner;
19 use crate::util::test_utils;
20 use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
21 use crate::util::ser::{Writeable, ReadableArgs};
22 use crate::util::config::UserConfig;
23
24 use bitcoin::hash_types::BlockHash;
25
26 use crate::prelude::*;
27 use core::default::Default;
28 use crate::sync::Mutex;
29
30 use crate::ln::functional_test_utils::*;
31
32 #[test]
33 fn test_funding_peer_disconnect() {
34         // Test that we can lock in our funding tx while disconnected
35         let chanmon_cfgs = create_chanmon_cfgs(2);
36         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
37         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
38         let persister: test_utils::TestPersister;
39         let new_chain_monitor: test_utils::TestChainMonitor;
40         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
41         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
42         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
43
44         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
45         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
46
47         confirm_transaction(&nodes[0], &tx);
48         let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
49         assert!(events_1.is_empty());
50
51         reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
52
53         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
54         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
55
56         confirm_transaction(&nodes[1], &tx);
57         let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
58         assert!(events_2.is_empty());
59
60         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
61         let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap();
62         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
63         let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap();
64
65         // nodes[0] hasn't yet received a channel_ready, so it only sends that on reconnect.
66         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish);
67         let events_3 = nodes[0].node.get_and_clear_pending_msg_events();
68         assert_eq!(events_3.len(), 1);
69         let as_channel_ready = match events_3[0] {
70                 MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
71                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
72                         msg.clone()
73                 },
74                 _ => panic!("Unexpected event {:?}", events_3[0]),
75         };
76
77         // nodes[1] received nodes[0]'s channel_ready on the first reconnect above, so it should send
78         // announcement_signatures as well as channel_update.
79         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish);
80         let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
81         assert_eq!(events_4.len(), 3);
82         let chan_id;
83         let bs_channel_ready = match events_4[0] {
84                 MessageSendEvent::SendChannelReady { ref node_id, ref msg } => {
85                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
86                         chan_id = msg.channel_id;
87                         msg.clone()
88                 },
89                 _ => panic!("Unexpected event {:?}", events_4[0]),
90         };
91         let bs_announcement_sigs = match events_4[1] {
92                 MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
93                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
94                         msg.clone()
95                 },
96                 _ => panic!("Unexpected event {:?}", events_4[1]),
97         };
98         match events_4[2] {
99                 MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => {
100                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
101                 },
102                 _ => panic!("Unexpected event {:?}", events_4[2]),
103         }
104
105         // Re-deliver nodes[0]'s channel_ready, which nodes[1] can safely ignore. It currently
106         // generates a duplicative private channel_update
107         nodes[1].node.handle_channel_ready(&nodes[0].node.get_our_node_id(), &as_channel_ready);
108         let events_5 = nodes[1].node.get_and_clear_pending_msg_events();
109         assert_eq!(events_5.len(), 1);
110         match events_5[0] {
111                 MessageSendEvent::SendChannelUpdate { ref node_id, msg: _ } => {
112                         assert_eq!(*node_id, nodes[0].node.get_our_node_id());
113                 },
114                 _ => panic!("Unexpected event {:?}", events_5[0]),
115         };
116
117         // When we deliver nodes[1]'s channel_ready, however, nodes[0] will generate its
118         // announcement_signatures.
119         nodes[0].node.handle_channel_ready(&nodes[1].node.get_our_node_id(), &bs_channel_ready);
120         let events_6 = nodes[0].node.get_and_clear_pending_msg_events();
121         assert_eq!(events_6.len(), 1);
122         let as_announcement_sigs = match events_6[0] {
123                 MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
124                         assert_eq!(*node_id, nodes[1].node.get_our_node_id());
125                         msg.clone()
126                 },
127                 _ => panic!("Unexpected event {:?}", events_6[0]),
128         };
129         expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id());
130         expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id());
131
132         // When we deliver nodes[1]'s announcement_signatures to nodes[0], nodes[0] should immediately
133         // broadcast the channel announcement globally, as well as re-send its (now-public)
134         // channel_update.
135         nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs);
136         let events_7 = nodes[0].node.get_and_clear_pending_msg_events();
137         assert_eq!(events_7.len(), 1);
138         let (chan_announcement, as_update) = match events_7[0] {
139                 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
140                         (msg.clone(), update_msg.clone())
141                 },
142                 _ => panic!("Unexpected event {:?}", events_7[0]),
143         };
144
145         // Finally, deliver nodes[0]'s announcement_signatures to nodes[1] and make sure it creates the
146         // same channel_announcement.
147         nodes[1].node.handle_announcement_signatures(&nodes[0].node.get_our_node_id(), &as_announcement_sigs);
148         let events_8 = nodes[1].node.get_and_clear_pending_msg_events();
149         assert_eq!(events_8.len(), 1);
150         let bs_update = match events_8[0] {
151                 MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
152                         assert_eq!(*msg, chan_announcement);
153                         update_msg.clone()
154                 },
155                 _ => panic!("Unexpected event {:?}", events_8[0]),
156         };
157
158         // Provide the channel announcement and public updates to the network graph
159         nodes[0].gossip_sync.handle_channel_announcement(&chan_announcement).unwrap();
160         nodes[0].gossip_sync.handle_channel_update(&bs_update).unwrap();
161         nodes[0].gossip_sync.handle_channel_update(&as_update).unwrap();
162
163         let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000);
164         let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
165         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
166
167         // Check that after deserialization and reconnection we can still generate an identical
168         // channel_announcement from the cached signatures.
169         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
170
171         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
172
173         reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
174
175         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
176 }
177
178 #[test]
179 fn test_no_txn_manager_serialize_deserialize() {
180         let chanmon_cfgs = create_chanmon_cfgs(2);
181         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
182         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
183         let persister: test_utils::TestPersister;
184         let new_chain_monitor: test_utils::TestChainMonitor;
185         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
186         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
187
188         let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
189
190         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
191
192         let chan_0_monitor_serialized =
193                 get_monitor!(nodes[0], OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).encode();
194         reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
195
196         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
197         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
198         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
199         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
200
201         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
202         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
203         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
204         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
205
206         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
207         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
208         for node in nodes.iter() {
209                 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
210                 node.gossip_sync.handle_channel_update(&as_update).unwrap();
211                 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
212         }
213
214         send_payment(&nodes[0], &[&nodes[1]], 1000000);
215 }
216
217 #[test]
218 fn test_manager_serialize_deserialize_events() {
219         // This test makes sure the events field in ChannelManager survives de/serialization
220         let chanmon_cfgs = create_chanmon_cfgs(2);
221         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
222         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
223         let persister: test_utils::TestPersister;
224         let new_chain_monitor: test_utils::TestChainMonitor;
225         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
226         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
227
228         // Start creating a channel, but stop right before broadcasting the funding transaction
229         let channel_value = 100000;
230         let push_msat = 10001;
231         let a_flags = channelmanager::provided_init_features();
232         let b_flags = channelmanager::provided_init_features();
233         let node_a = nodes.remove(0);
234         let node_b = nodes.remove(0);
235         node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap();
236         node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
237         node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
238
239         let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, &node_b.node.get_our_node_id(), channel_value, 42);
240
241         node_a.node.funding_transaction_generated(&temporary_channel_id, &node_b.node.get_our_node_id(), tx.clone()).unwrap();
242         check_added_monitors!(node_a, 0);
243
244         node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
245         {
246                 let mut added_monitors = node_b.chain_monitor.added_monitors.lock().unwrap();
247                 assert_eq!(added_monitors.len(), 1);
248                 assert_eq!(added_monitors[0].0, funding_output);
249                 added_monitors.clear();
250         }
251
252         let bs_funding_signed = get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id());
253         node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &bs_funding_signed);
254         {
255                 let mut added_monitors = node_a.chain_monitor.added_monitors.lock().unwrap();
256                 assert_eq!(added_monitors.len(), 1);
257                 assert_eq!(added_monitors[0].0, funding_output);
258                 added_monitors.clear();
259         }
260         // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
261
262         nodes.push(node_a);
263         nodes.push(node_b);
264
265         // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
266         let chan_0_monitor_serialized = get_monitor!(nodes[0], bs_funding_signed.channel_id).encode();
267         reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
268
269         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
270
271         // After deserializing, make sure the funding_transaction is still held by the channel manager
272         let events_4 = nodes[0].node.get_and_clear_pending_events();
273         assert_eq!(events_4.len(), 0);
274         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
275         assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
276
277         // Make sure the channel is functioning as though the de/serialization never happened
278         assert_eq!(nodes[0].node.list_channels().len(), 1);
279
280         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
281         let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
282         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
283         let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
284
285         nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
286         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
287         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
288         assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
289
290         let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
291         let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
292         for node in nodes.iter() {
293                 assert!(node.gossip_sync.handle_channel_announcement(&announcement).unwrap());
294                 node.gossip_sync.handle_channel_update(&as_update).unwrap();
295                 node.gossip_sync.handle_channel_update(&bs_update).unwrap();
296         }
297
298         send_payment(&nodes[0], &[&nodes[1]], 1000000);
299 }
300
301 #[test]
302 fn test_simple_manager_serialize_deserialize() {
303         let chanmon_cfgs = create_chanmon_cfgs(2);
304         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
305         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
306         let persister: test_utils::TestPersister;
307         let new_chain_monitor: test_utils::TestChainMonitor;
308         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
309         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
310         let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
311
312         let (our_payment_preimage, _, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
313         let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
314
315         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
316
317         let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode();
318         reload_node!(nodes[0], nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized);
319
320         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
321
322         fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
323         claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
324 }
325
326 #[test]
327 fn test_manager_serialize_deserialize_inconsistent_monitor() {
328         // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
329         let chanmon_cfgs = create_chanmon_cfgs(4);
330         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
331         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
332         let logger: test_utils::TestLogger;
333         let fee_estimator: test_utils::TestFeeEstimator;
334         let persister: test_utils::TestPersister;
335         let new_chain_monitor: test_utils::TestChainMonitor;
336         let nodes_0_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
337         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
338         let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
339         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 2, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
340         let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features());
341
342         let mut node_0_stale_monitors_serialized = Vec::new();
343         for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] {
344                 let mut writer = test_utils::TestVecWriter(Vec::new());
345                 get_monitor!(nodes[0], chan_id_iter).write(&mut writer).unwrap();
346                 node_0_stale_monitors_serialized.push(writer.0);
347         }
348
349         let (our_payment_preimage, _, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
350
351         // Serialize the ChannelManager here, but the monitor we keep up-to-date
352         let nodes_0_serialized = nodes[0].node.encode();
353
354         route_payment(&nodes[0], &[&nodes[3]], 1000000);
355         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
356         nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
357         nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
358
359         // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
360         // nodes[3])
361         let mut node_0_monitors_serialized = Vec::new();
362         for chan_id_iter in &[chan_id_1, chan_id_2, channel_id] {
363                 node_0_monitors_serialized.push(get_monitor!(nodes[0], chan_id_iter).encode());
364         }
365
366         logger = test_utils::TestLogger::new();
367         fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
368         persister = test_utils::TestPersister::new();
369         let keys_manager = &chanmon_cfgs[0].keys_manager;
370         new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
371         nodes[0].chain_monitor = &new_chain_monitor;
372
373
374         let mut node_0_stale_monitors = Vec::new();
375         for serialized in node_0_stale_monitors_serialized.iter() {
376                 let mut read = &serialized[..];
377                 let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
378                 assert!(read.is_empty());
379                 node_0_stale_monitors.push(monitor);
380         }
381
382         let mut node_0_monitors = Vec::new();
383         for serialized in node_0_monitors_serialized.iter() {
384                 let mut read = &serialized[..];
385                 let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
386                 assert!(read.is_empty());
387                 node_0_monitors.push(monitor);
388         }
389
390         let mut nodes_0_read = &nodes_0_serialized[..];
391         if let Err(msgs::DecodeError::InvalidValue) =
392                 <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
393                 default_config: UserConfig::default(),
394                 keys_manager,
395                 fee_estimator: &fee_estimator,
396                 chain_monitor: nodes[0].chain_monitor,
397                 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
398                 logger: &logger,
399                 channel_monitors: node_0_stale_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
400         }) { } else {
401                 panic!("If the monitor(s) are stale, this indicates a bug and we should get an Err return");
402         };
403
404         let mut nodes_0_read = &nodes_0_serialized[..];
405         let (_, nodes_0_deserialized_tmp) =
406                 <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
407                 default_config: UserConfig::default(),
408                 keys_manager,
409                 fee_estimator: &fee_estimator,
410                 chain_monitor: nodes[0].chain_monitor,
411                 tx_broadcaster: nodes[0].tx_broadcaster.clone(),
412                 logger: &logger,
413                 channel_monitors: node_0_monitors.iter_mut().map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect(),
414         }).unwrap();
415         nodes_0_deserialized = nodes_0_deserialized_tmp;
416         assert!(nodes_0_read.is_empty());
417
418         { // Channel close should result in a commitment tx
419                 let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
420                 assert_eq!(txn.len(), 1);
421                 check_spends!(txn[0], funding_tx);
422                 assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
423         }
424
425         for monitor in node_0_monitors.drain(..) {
426                 assert_eq!(nodes[0].chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor),
427                         ChannelMonitorUpdateStatus::Completed);
428                 check_added_monitors!(nodes[0], 1);
429         }
430         nodes[0].node = &nodes_0_deserialized;
431         check_closed_event!(nodes[0], 1, ClosureReason::OutdatedChannelManager);
432
433         // nodes[1] and nodes[2] have no lost state with nodes[0]...
434         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
435         reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
436         //... and we can even still claim the payment!
437         claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
438
439         nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
440         let reestablish = get_chan_reestablish_msgs!(nodes[3], nodes[0]).pop().unwrap();
441         nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
442         nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish);
443         let mut found_err = false;
444         for msg_event in nodes[0].node.get_and_clear_pending_msg_events() {
445                 if let MessageSendEvent::HandleError { ref action, .. } = msg_event {
446                         match action {
447                                 &ErrorAction::SendErrorMessage { ref msg } => {
448                                         assert_eq!(msg.channel_id, channel_id);
449                                         assert!(!found_err);
450                                         found_err = true;
451                                 },
452                                 _ => panic!("Unexpected event!"),
453                         }
454                 }
455         }
456         assert!(found_err);
457 }
458
459 fn do_test_data_loss_protect(reconnect_panicing: bool) {
460         // When we get a data_loss_protect proving we're behind, we immediately panic as the
461         // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The
462         // panic message informs the user they should force-close without broadcasting, which is tested
463         // if `reconnect_panicing` is not set.
464         let mut chanmon_cfgs = create_chanmon_cfgs(2);
465         // We broadcast during Drop because chanmon is out of sync with chanmgr, which would cause a panic
466         // during signing due to revoked tx
467         chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
468         let persister;
469         let new_chain_monitor;
470         let nodes_0_deserialized;
471         let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
472         let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
473         let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
474
475         let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, channelmanager::provided_init_features(), channelmanager::provided_init_features());
476
477         // Cache node A state before any channel update
478         let previous_node_state = nodes[0].node.encode();
479         let previous_chain_monitor_state = get_monitor!(nodes[0], chan.2).encode();
480
481         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
482         send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
483
484         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
485         nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
486
487         reload_node!(nodes[0], previous_node_state, &[&previous_chain_monitor_state], persister, new_chain_monitor, nodes_0_deserialized);
488
489         if reconnect_panicing {
490                 nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
491                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
492
493                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
494
495                 // Check we close channel detecting A is fallen-behind
496                 // Check that we sent the warning message when we detected that A has fallen behind,
497                 // and give the possibility for A to recover from the warning.
498                 nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
499                 let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned();
500                 assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg));
501
502                 {
503                         let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
504                         // The node B should not broadcast the transaction to force close the channel!
505                         assert!(node_txn.is_empty());
506                 }
507
508                 let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
509                 // Check A panics upon seeing proof it has fallen behind.
510                 nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]);
511                 return; // By this point we should have panic'ed!
512         }
513
514         nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
515         check_added_monitors!(nodes[0], 1);
516         check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
517         {
518                 let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
519                 assert_eq!(node_txn.len(), 0);
520         }
521
522         for msg in nodes[0].node.get_and_clear_pending_msg_events() {
523                 if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
524                 } else if let MessageSendEvent::HandleError { ref action, .. } = msg {
525                         match action {
526                                 &ErrorAction::SendErrorMessage { ref msg } => {
527                                         assert_eq!(msg.data, "Channel force-closed");
528                                 },
529                                 _ => panic!("Unexpected event!"),
530                         }
531                 } else {
532                         panic!("Unexpected event {:?}", msg)
533                 }
534         }
535
536         // after the warning message sent by B, we should not able to
537         // use the channel, or reconnect with success to the channel.
538         assert!(nodes[0].node.list_usable_channels().is_empty());
539         nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
540         nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
541         let retry_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
542
543         nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &retry_reestablish[0]);
544         let mut err_msgs_0 = Vec::with_capacity(1);
545         for msg in nodes[0].node.get_and_clear_pending_msg_events() {
546                 if let MessageSendEvent::HandleError { ref action, .. } = msg {
547                         match action {
548                                 &ErrorAction::SendErrorMessage { ref msg } => {
549                                         assert_eq!(msg.data, "Failed to find corresponding channel");
550                                         err_msgs_0.push(msg.clone());
551                                 },
552                                 _ => panic!("Unexpected event!"),
553                         }
554                 } else {
555                         panic!("Unexpected event!");
556                 }
557         }
558         assert_eq!(err_msgs_0.len(), 1);
559         nodes[1].node.handle_error(&nodes[0].node.get_our_node_id(), &err_msgs_0[0]);
560         assert!(nodes[1].node.list_usable_channels().is_empty());
561         check_added_monitors!(nodes[1], 1);
562         check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyForceClosed { peer_msg: "Failed to find corresponding channel".to_owned() });
563         check_closed_broadcast!(nodes[1], false);
564 }
565
566 #[test]
567 #[should_panic]
568 fn test_data_loss_protect_showing_stale_state_panics() {
569         do_test_data_loss_protect(true);
570 }
571
572 #[test]
573 fn test_force_close_without_broadcast() {
574         do_test_data_loss_protect(false);
575 }
576
577 #[test]
578 fn test_forwardable_regen() {
579         // Tests that if we reload a ChannelManager while forwards are pending we will regenerate the
580         // PendingHTLCsForwardable event automatically, ensuring we don't forget to forward/receive
581         // HTLCs.
582         // We test it for both payment receipt and payment forwarding.
583
584         let chanmon_cfgs = create_chanmon_cfgs(3);
585         let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
586         let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
587         let persister: test_utils::TestPersister;
588         let new_chain_monitor: test_utils::TestChainMonitor;
589         let nodes_1_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
590         let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
591         let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
592         let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
593
594         // First send a payment to nodes[1]
595         let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
596         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
597         check_added_monitors!(nodes[0], 1);
598
599         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
600         assert_eq!(events.len(), 1);
601         let payment_event = SendEvent::from_event(events.pop().unwrap());
602         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
603         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
604
605         expect_pending_htlcs_forwardable_ignore!(nodes[1]);
606
607         // Next send a payment which is forwarded by nodes[1]
608         let (route_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 200_000);
609         nodes[0].node.send_payment(&route_2, payment_hash_2, &Some(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
610         check_added_monitors!(nodes[0], 1);
611
612         let mut events = nodes[0].node.get_and_clear_pending_msg_events();
613         assert_eq!(events.len(), 1);
614         let payment_event = SendEvent::from_event(events.pop().unwrap());
615         nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
616         commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
617
618         // There is already a PendingHTLCsForwardable event "pending" so another one will not be
619         // generated
620         assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
621
622         // Now restart nodes[1] and make sure it regenerates a single PendingHTLCsForwardable
623         nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
624         nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
625
626         let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode();
627         let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode();
628         reload_node!(nodes[1], nodes[1].node.encode(), &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized);
629
630         reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
631         // Note that nodes[1] and nodes[2] resend their channel_ready here since they haven't updated
632         // the commitment state.
633         reconnect_nodes(&nodes[1], &nodes[2], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
634
635         assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
636
637         expect_pending_htlcs_forwardable!(nodes[1]);
638         expect_payment_claimable!(nodes[1], payment_hash, payment_secret, 100_000);
639         check_added_monitors!(nodes[1], 1);
640
641         let mut events = nodes[1].node.get_and_clear_pending_msg_events();
642         assert_eq!(events.len(), 1);
643         let payment_event = SendEvent::from_event(events.pop().unwrap());
644         nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]);
645         commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false);
646         expect_pending_htlcs_forwardable!(nodes[2]);
647         expect_payment_claimable!(nodes[2], payment_hash_2, payment_secret_2, 200_000);
648
649         claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
650         claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
651 }
652
653 fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
654         // Test what happens if a node receives an MPP payment, claims it, but crashes before
655         // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
656         // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
657         // have the PaymentClaimable event, (b) have one (or two) channel(s) that goes on chain with the
658         // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
659         // not have the preimage tied to the still-pending HTLC.
660         //
661         // To get to the correct state, on startup we should propagate the preimage to the
662         // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor
663         // receiving the preimage without a state update.
664         //
665         // Further, we should generate a `PaymentClaimed` event to inform the user that the payment was
666         // definitely claimed.
667         let chanmon_cfgs = create_chanmon_cfgs(4);
668         let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
669         let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
670
671         let persister: test_utils::TestPersister;
672         let new_chain_monitor: test_utils::TestChainMonitor;
673         let nodes_3_deserialized: ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
674
675         let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
676
677         create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features());
678         create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features());
679         let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
680         let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2;
681
682         // Create an MPP route for 15k sats, more than the default htlc-max of 10%
683         let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
684         assert_eq!(route.paths.len(), 2);
685         route.paths.sort_by(|path_a, _| {
686                 // Sort the path so that the path through nodes[1] comes first
687                 if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
688                         core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
689         });
690
691         nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
692         check_added_monitors!(nodes[0], 2);
693
694         // Send the payment through to nodes[3] *without* clearing the PaymentClaimable event
695         let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
696         assert_eq!(send_events.len(), 2);
697         do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
698         do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
699
700         // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
701         // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
702         let mut original_monitor = test_utils::TestVecWriter(Vec::new());
703         if !persist_both_monitors {
704                 for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
705                         if outpoint.to_channel_id() == chan_id_not_persisted {
706                                 assert!(original_monitor.0.is_empty());
707                                 nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
708                         }
709                 }
710         }
711
712         let original_manager = nodes[3].node.encode();
713
714         expect_payment_claimable!(nodes[3], payment_hash, payment_secret, 15_000_000);
715
716         nodes[3].node.claim_funds(payment_preimage);
717         check_added_monitors!(nodes[3], 2);
718         expect_payment_claimed!(nodes[3], payment_hash, 15_000_000);
719
720         // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we
721         // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
722         // with the old ChannelManager.
723         let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
724         for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
725                 if outpoint.to_channel_id() == chan_id_persisted {
726                         assert!(updated_monitor.0.is_empty());
727                         nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
728                 }
729         }
730         // If `persist_both_monitors` is set, get the second monitor here as well
731         if persist_both_monitors {
732                 for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
733                         if outpoint.to_channel_id() == chan_id_not_persisted {
734                                 assert!(original_monitor.0.is_empty());
735                                 nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
736                         }
737                 }
738         }
739
740         // Now restart nodes[3].
741         reload_node!(nodes[3], original_manager, &[&updated_monitor.0, &original_monitor.0], persister, new_chain_monitor, nodes_3_deserialized);
742
743         // On startup the preimage should have been copied into the non-persisted monitor:
744         assert!(get_monitor!(nodes[3], chan_id_persisted).get_stored_preimages().contains_key(&payment_hash));
745         assert!(get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash));
746
747         nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
748         nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
749
750         // During deserialization, we should have closed one channel and broadcast its latest
751         // commitment transaction. We should also still have the original PaymentClaimable event we
752         // never finished processing.
753         let events = nodes[3].node.get_and_clear_pending_events();
754         assert_eq!(events.len(), if persist_both_monitors { 4 } else { 3 });
755         if let Event::PaymentClaimable { amount_msat: 15_000_000, .. } = events[0] { } else { panic!(); }
756         if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
757         if persist_both_monitors {
758                 if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
759         }
760
761         // On restart, we should also get a duplicate PaymentClaimed event as we persisted the
762         // ChannelManager prior to handling the original one.
763         if let Event::PaymentClaimed { payment_hash: our_payment_hash, amount_msat: 15_000_000, .. } =
764                 events[if persist_both_monitors { 3 } else { 2 }]
765         {
766                 assert_eq!(payment_hash, our_payment_hash);
767         } else { panic!(); }
768
769         assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 });
770         if !persist_both_monitors {
771                 // If one of the two channels is still live, reveal the payment preimage over it.
772
773                 nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
774                 let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
775                 nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
776                 let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
777
778                 nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
779                 get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
780                 assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
781
782                 nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]);
783
784                 // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC
785                 // claim should fly.
786                 let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
787                 check_added_monitors!(nodes[3], 1);
788                 assert_eq!(ds_msgs.len(), 2);
789                 if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); }
790
791                 let cs_updates = match ds_msgs[1] {
792                         MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
793                                 nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
794                                 check_added_monitors!(nodes[2], 1);
795                                 let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
796                                 expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false);
797                                 commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
798                                 cs_updates
799                         }
800                         _ => panic!(),
801                 };
802
803                 nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
804                 commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
805                 expect_payment_sent!(nodes[0], payment_preimage);
806         }
807 }
808
809 #[test]
810 fn test_partial_claim_before_restart() {
811         do_test_partial_claim_before_restart(false);
812         do_test_partial_claim_before_restart(true);
813 }