+
+#[test]
+fn test_path_paused_mpp() {
+ // Simple test of sending a multi-part payment where one path is currently blocked awaiting
+ // monitor update
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
+ let logger = test_utils::TestLogger::new();
+
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[3]);
+ let mut route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap();
+
+ // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3:
+ let path = route.paths[0].clone();
+ route.paths.push(path);
+ route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
+ route.paths[0][0].short_channel_id = chan_1_id;
+ route.paths[0][1].short_channel_id = chan_3_id;
+ route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
+ route.paths[1][0].short_channel_id = chan_2_ann.contents.short_channel_id;
+ route.paths[1][1].short_channel_id = chan_4_id;
+
+ // Set it so that the first monitor update (for the path 0 -> 1 -> 3) succeeds, but the second
+ // (for the path 0 -> 2 -> 3) fails.
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
+ *nodes[0].chain_monitor.next_update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+
+ // Now check that we get the right return value, indicating that the first path succeeded but
+ // the second got a MonitorUpdateFailed err. This implies PaymentSendFailure::PartialFailure as
+ // some paths succeeded, preventing retry.
+ if let Err(PaymentSendFailure::PartialFailure(results)) = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)) {
+ assert_eq!(results.len(), 2);
+ if let Ok(()) = results[0] {} else { panic!(); }
+ if let Err(APIError::MonitorUpdateFailed) = results[1] {} else { panic!(); }
+ } else { panic!(); }
+ check_added_monitors!(nodes[0], 2);
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Ok(()));
+
+ // Pass the first HTLC of the payment along to nodes[3].
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None);
+
+ // And check that, after we successfully update the monitor for chan_2 we can pass the second
+ // HTLC along to nodes[3] and claim the whole payment back to nodes[0].
+ let (outpoint, latest_update) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&outpoint, latest_update);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
+
+ claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+}
+
+fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
+ // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we
+ // properly free them on reconnect. We previously failed such HTLCs upon serialization, but
+ // that behavior was both somewhat unexpected and also broken (there was a debug assertion
+ // which failed in such a case).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let persister: test_utils::TestPersister;
+ let new_chain_monitor: test_utils::TestChainMonitor;
+ let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000, InitFeatures::known(), InitFeatures::known()).2;
+ let (payment_preimage_1, payment_hash_1, payment_secret_1) = get_payment_preimage_hash!(&nodes[1]);
+ let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]);
+
+ // Do a really complicated dance to get an HTLC into the holding cell, with MonitorUpdateFailed
+ // set but AwaitingRemoteRevoke unset. When this test was written, any attempts to send an HTLC
+ // while MonitorUpdateFailed is set are immediately failed-backwards. Thus, the only way to get
+ // an AddHTLC into the holding cell is to add it while AwaitingRemoteRevoke is set but
+ // MonitorUpdateFailed is unset, and then swap the flags.
+ //
+ // We do this by:
+ // a) routing a payment from node B to node A,
+ // b) sending a payment from node A to node B without delivering any of the generated messages,
+ // putting node A in AwaitingRemoteRevoke,
+ // c) sending a second payment from node A to node B, which is immediately placed in the
+ // holding cell,
+ // d) claiming the first payment from B, allowing us to fail the monitor update which occurs
+ // when we try to persist the payment preimage,
+ // e) delivering A's commitment_signed from (b) and the resulting B revoke_and_ack message,
+ // clearing AwaitingRemoteRevoke on node A.
+ //
+ // Note that because, at the end, MonitorUpdateFailed is still set, the HTLC generated in (c)
+ // will not be freed from the holding cell.
+ let (payment_preimage_0, _, _) = route_payment(&nodes[1], &[&nodes[0]], 100000);
+
+ let route = {
+ let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
+ get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, None, &Vec::new(), 100000, TEST_FINAL_CLTV, nodes[0].logger).unwrap()
+ };
+
+ nodes[0].node.send_payment(&route, payment_hash_1, &Some(payment_secret_1)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let send = SendEvent::from_node(&nodes[0]);
+ assert_eq!(send.msgs.len(), 1);
+
+ nodes[0].node.send_payment(&route, payment_hash_2, &Some(payment_secret_2)).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
+ assert!(nodes[0].node.claim_funds(payment_preimage_0));
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send.msgs[0]);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send.commitment_msg);
+ check_added_monitors!(nodes[1], 1);
+
+ let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa);
+ check_added_monitors!(nodes[0], 1);
+
+ if disconnect {
+ // Optionally reload nodes[0] entirely through a serialization roundtrip, otherwise just
+ // disconnect the peers. Note that the fuzzer originally found this issue because
+ // deserializing a ChannelManager in this state causes an assertion failure.
+ if reload_a {
+ let nodes_0_serialized = nodes[0].node.encode();
+ let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
+ nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
+
+ persister = test_utils::TestPersister::new();
+ let keys_manager = &chanmon_cfgs[0].keys_manager;
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), nodes[0].logger, node_cfgs[0].fee_estimator, &persister, keys_manager);
+ nodes[0].chain_monitor = &new_chain_monitor;
+ let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
+ let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(
+ &mut chan_0_monitor_read, keys_manager).unwrap();
+ assert!(chan_0_monitor_read.is_empty());
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ let config = UserConfig::default();
+ nodes_0_deserialized = {
+ let mut channel_monitors = HashMap::new();
+ channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
+ <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ default_config: config,
+ keys_manager,
+ fee_estimator: node_cfgs[0].fee_estimator,
+ chain_monitor: nodes[0].chain_monitor,
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: nodes[0].logger,
+ channel_monitors,
+ }).unwrap().1
+ };
+ nodes[0].node = &nodes_0_deserialized;
+ assert!(nodes_0_read.is_empty());
+
+ nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0.clone(), chan_0_monitor).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ } else {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ }
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ // Now reconnect the two
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ assert_eq!(reestablish_1.len(), 1);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty() });
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ assert_eq!(reestablish_2.len(), 1);
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]);
+ let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ check_added_monitors!(nodes[1], 0);
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
+ let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+
+ assert!(resp_0.0.is_none());
+ assert!(resp_0.1.is_none());
+ assert!(resp_0.2.is_none());
+ assert!(resp_1.0.is_none());
+ assert!(resp_1.1.is_none());
+
+ // Check that the freshly-generated cs is equal to the original (which we will deliver in a
+ // moment).
+ if let Some(pending_cs) = resp_1.2 {
+ assert!(pending_cs.update_add_htlcs.is_empty());
+ assert!(pending_cs.update_fail_htlcs.is_empty());
+ assert!(pending_cs.update_fulfill_htlcs.is_empty());
+ assert_eq!(pending_cs.commitment_signed, cs);
+ } else { panic!(); }
+
+ // There should be no monitor updates as we are still pending awaiting a failed one.
+ check_added_monitors!(nodes[0], 0);
+ check_added_monitors!(nodes[1], 0);
+ }
+
+ // If we finish updating the monitor, we should free the holding cell right away (this did
+ // not occur prior to #756).
+ *nodes[0].chain_monitor.update_ret.lock().unwrap() = None;
+ let (funding_txo, mon_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
+ nodes[0].node.channel_monitor_updated(&funding_txo, mon_id);
+
+ // New outbound messages should be generated immediately upon a call to
+ // get_and_clear_pending_msg_events (but not before).
+ check_added_monitors!(nodes[0], 0);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[0], 1);
+ assert_eq!(events.len(), 1);
+
+ // Deliver the pending in-flight CS
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &cs);
+ check_added_monitors!(nodes[0], 1);
+
+ let commitment_msg = match events.pop().unwrap() {
+ MessageSendEvent::UpdateHTLCs { node_id, updates } => {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ expect_payment_sent!(nodes[1], payment_preimage_0);
+ assert_eq!(updates.update_add_htlcs.len(), 1);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+ updates.commitment_signed
+ },
+ _ => panic!("Unexpected event type!"),
+ };
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_msg);
+ check_added_monitors!(nodes[1], 1);
+
+ let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_1, payment_secret_1, 100000);
+ check_added_monitors!(nodes[1], 1);
+
+ commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, payment_secret_2, 100000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+#[test]
+fn channel_holding_cell_serialize() {
+ do_channel_holding_cell_serialize(true, true);
+ do_channel_holding_cell_serialize(true, false);
+ do_channel_holding_cell_serialize(false, true); // last arg doesn't matter
+}
+
+#[derive(PartialEq)]
+enum HTLCStatusAtDupClaim {
+ Received,
+ HoldingCell,
+ Cleared,
+}
+fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_fails: bool) {
+ // When receiving an update_fulfill_htlc message, we immediately forward the claim backwards
+ // along the payment path before waiting for a full commitment_signed dance. This is great, but
+ // can cause duplicative claims if a node sends an update_fulfill_htlc message, disconnects,
+ // reconnects, and then has to re-send its update_fulfill_htlc message again.
+ // In previous code, we didn't handle the double-claim correctly, spuriously closing the
+ // channel on which the inbound HTLC was received.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()).2;
+
+ let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+
+ let mut as_raa = None;
+ if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
+ // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be
+ // awaiting a remote revoke_and_ack from nodes[0].
+ let (_, second_payment_hash, second_payment_secret) = get_payment_preimage_hash!(nodes[1]);
+ let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph.read().unwrap(),
+ &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 100_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap();
+ nodes[0].node.send_payment(&route, second_payment_hash, &Some(second_payment_secret)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
+ check_added_monitors!(nodes[1], 1);
+
+ let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
+ check_added_monitors!(nodes[0], 1);
+
+ as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()));
+ }
+
+ let fulfill_msg = msgs::UpdateFulfillHTLC {
+ channel_id: chan_2,
+ htlc_id: 0,
+ payment_preimage,
+ };
+ if second_fails {
+ assert!(nodes[2].node.fail_htlc_backwards(&payment_hash));
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+ get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ } else {
+ assert!(nodes[2].node.claim_funds(payment_preimage));
+ check_added_monitors!(nodes[2], 1);
+ let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1);
+ // Check that the message we're about to deliver matches the one generated:
+ assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]);
+ }
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &fulfill_msg);
+ expect_payment_forwarded!(nodes[1], Some(1000), false);
+ check_added_monitors!(nodes[1], 1);
+
+ let mut bs_updates = None;
+ if htlc_status != HTLCStatusAtDupClaim::HoldingCell {
+ bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
+ assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
+ expect_payment_sent!(nodes[0], payment_preimage);
+ if htlc_status == HTLCStatusAtDupClaim::Cleared {
+ commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
+ }
+ } else {
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ }
+
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+
+ if second_fails {
+ reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ } else {
+ reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ if htlc_status == HTLCStatusAtDupClaim::HoldingCell {
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa.unwrap());
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it
+
+ bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
+ assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
+ expect_payment_sent!(nodes[0], payment_preimage);
+ }
+ if htlc_status != HTLCStatusAtDupClaim::Cleared {
+ commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
+ }
+}
+
+#[test]
+fn test_reconnect_dup_htlc_claims() {
+ do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, false);
+ do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, false);
+ do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, false);
+ do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Received, true);
+ do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::HoldingCell, true);
+ do_test_reconnect_dup_htlc_claims(HTLCStatusAtDupClaim::Cleared, true);
+}