use bitcoin::blockdata::constants::genesis_block;
use bitcoin::hash_types::BlockHash;
-use bitcoin::network::constants::Network;
+use bitcoin::network::Network;
use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor};
use crate::chain::transaction::OutPoint;
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
use crate::ln::msgs;
+use crate::ln::types::ChannelId;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
use crate::util::test_channel_signer::TestChannelSigner;
use crate::util::errors::APIError;
assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_1, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
}
// ...and make sure we can force-close a frozen channel
- nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ let error_message = "Channel force-closed";
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);
// PaymentPathFailed event
assert_eq!(nodes[0].node.list_channels().len(), 0);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_2, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(our_payment_secret, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ // As long as the preimage isn't on-chain, we shouldn't expose the `PaymentClaimed` event to
+ // users nor send the preimage to peers in the new commitment update.
nodes[1].node.claim_funds(payment_preimage_1);
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
assert_eq!(via_channel_id, Some(channel_id));
assert_eq!(via_user_channel_id, Some(42));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_2, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id());
assert_eq!(via_channel_id, Some(channel_id));
match &purpose {
- PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
+ PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => {
assert!(payment_preimage.is_none());
assert_eq!(payment_secret_3, *payment_secret);
},
- _ => panic!("expected PaymentPurpose::InvoicePayment")
+ _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment")
}
},
_ => panic!("Unexpected event"),
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
- let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id();
+ let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index });
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors!(nodes[1], 1);
send_payment(&nodes[0], &[&nodes[1]], 8000000);
close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
}
#[test]
assert_eq!(events.len(), 1);
pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None);
- claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage);
+ claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage)
+ );
}
#[test]
assert_eq!(txn_a, txn_b);
assert_eq!(txn_a.len(), 1);
check_spends!(txn_a[0], funding_tx);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
- check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
}
#[test]
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
let events = nodes[1].node.get_and_clear_pending_events();
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None).unwrap();
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap();
nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()));
let events = nodes[1].node.get_and_clear_pending_events();
let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id());
let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode();
+ let error_message = "Channel force-closed";
if close_chans_before_reload {
if !close_only_a {
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
- nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[2].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[2].node.get_our_node_id()], 100000);
}
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
- nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id()).unwrap();
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
- check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100000);
}
// Now reload node B
assert_eq!(bs_close_txn.len(), 3);
}
}
+ let error_message = "Channel force-closed";
- nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100000);
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000);
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(as_closing_tx.len(), 1);
do_test_durable_preimages_on_closed_channel(false, false, false);
}
+#[test]
+fn test_sync_async_persist_doesnt_hang() {
+ // Previously, we checked if a channel was a candidate for making forward progress based on if
+ // the `MonitorEvent::Completed` matched the channel's latest monitor update id. However, this
+ // could lead to a rare race when `ChannelMonitor`s were being persisted both synchronously and
+ // asynchronously leading to channel hangs.
+ //
+ // To hit this case, we need to generate a `MonitorEvent::Completed` prior to a new channel
+ // update, but which is only processed after the channel update.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+ // Send two payments from A to B, then claim the first, marking the very last
+ // ChannelMonitorUpdate as InProgress...
+ let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+
+ nodes[1].node.claim_funds(payment_preimage_1);
+ check_added_monitors(&nodes[1], 1);
+ expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
+
+ let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+ expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
+ check_added_monitors(&nodes[0], 1);
+ let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
+ check_added_monitors(&nodes[1], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs);
+ check_added_monitors(&nodes[1], 1);
+
+ let bs_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_final_raa);
+ check_added_monitors(&nodes[0], 1);
+
+ // Immediately complete the monitor update, but before the ChannelManager has a chance to see
+ // the MonitorEvent::Completed, create a channel update by receiving a claim on the second
+ // payment.
+ let (outpoint, _, ab_update_id) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
+ nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap();
+
+ nodes[1].node.claim_funds(payment_preimage_2);
+ check_added_monitors(&nodes[1], 1);
+ expect_payment_claimed!(nodes[1], payment_hash_2, 1_000_000);
+
+ let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed);
+ check_added_monitors(&nodes[0], 1);
+
+ // At this point, we have completed an extra `ChannelMonitorUpdate` but the `ChannelManager`
+ // hasn't yet seen our `MonitorEvent::Completed`. When we call
+ // `get_and_clear_pending_msg_events` here, the `ChannelManager` finally sees that event and
+ // should return the channel to normal operation.
+ let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ // Now that we've completed our test, process the events we have queued up (which we were not
+ // able to check until now as they would have caused the `ChannelManager` to look at the
+ // pending `MonitorEvent`s).
+ let pending_events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(pending_events.len(), 2);
+ if let Event::PaymentPathSuccessful { ref payment_hash, ..} = pending_events[1] {
+ assert_eq!(payment_hash.unwrap(), payment_hash_1);
+ } else { panic!(); }
+ if let Event::PaymentSent { ref payment_hash, ..} = pending_events[0] {
+ assert_eq!(*payment_hash, payment_hash_2);
+ } else { panic!(); }
+
+ // Finally, complete the claiming of the second payment
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
+ check_added_monitors(&nodes[1], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs);
+ check_added_monitors(&nodes[1], 1);
+
+ let bs_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_final_raa);
+ check_added_monitors(&nodes[0], 1);
+ expect_payment_path_successful!(nodes[0]);
+}
+
fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) {
// Test that if a `ChannelMonitorUpdate` completes but a `ChannelManager` isn't serialized
// before restart we run the monitor update completion action on startup.
let manager_b = nodes[1].node.encode();
reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized);
+ let error_message = "Channel force-closed";
if close_during_reload {
// Test that we still free the B<->C channel if the A<->B channel closed while we reloaded
// (as learned about during the on-reload block connection).
- nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id()).unwrap();
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100_000);
let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]);
}
let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().2;
let mut events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
- expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), close_during_reload, false);
+ expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000),
+ None, close_during_reload, false, false);
if close_during_reload {
match events[0] {
Event::ChannelClosed { .. } => {},
do_test_reload_mon_update_completion_actions(true);
do_test_reload_mon_update_completion_actions(false);
}
+
+fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) {
+ // Test that if a peer manages to send an `update_fulfill_htlc` message without a
+ // `commitment_signed`, disconnects, then replays the `update_fulfill_htlc` message it doesn't
+ // result in a channel hang. This was previously broken as the `DuplicateClaim` case wasn't
+ // handled when claiming an HTLC and handling wasn't added when completion actions were added
+ // (which must always complete at some point).
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 1, 2);
+
+ // Route a payment from A, through B, to C, then claim it on C. Replay the
+ // `update_fulfill_htlc` twice on B to check that B doesn't hang.
+ let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000);
+
+ nodes[2].node.claim_funds(payment_preimage);
+ check_added_monitors(&nodes[2], 1);
+ expect_payment_claimed!(nodes[2], payment_hash, 1_000_000);
+
+ let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id());
+ if hold_chan_a {
+ // The first update will be on the A <-> B channel, which we allow to complete.
+ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ }
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
+ check_added_monitors(&nodes[1], 1);
+
+ if !hold_chan_a {
+ let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
+ expect_payment_sent!(&nodes[0], payment_preimage);
+ }
+
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+
+ let mut reconnect = ReconnectArgs::new(&nodes[1], &nodes[2]);
+ reconnect.pending_htlc_claims = (1, 0);
+ reconnect_nodes(reconnect);
+
+ if !hold_chan_a {
+ expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
+ send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000);
+ } else {
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[1], nodes[2], 1_000_000);
+
+ nodes[1].node.send_payment_with_route(&route, payment_hash_2,
+ RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap();
+ check_added_monitors(&nodes[1], 0);
+
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ }
+}
+
+#[test]
+fn test_glacial_peer_cant_hang() {
+ do_test_glacial_peer_cant_hang(false);
+ do_test_glacial_peer_cant_hang(true);
+}