+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+}
+
+fn do_outbound_update_no_early_closing_signed(use_htlc: bool) {
+ // Previously, if we have a pending inbound HTLC (or fee update) on a channel which has
+ // initiated shutdown, we'd send our initial closing_signed immediately after receiving the
+ // peer's last RAA to remove the HTLC/fee update, but before receiving their final
+ // commitment_signed for a commitment without the HTLC/with the new fee. This caused at least
+ // LDK peers to force-close as we initiated closing_signed prior to the channel actually being
+ // fully empty of pending updates/HTLCs.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+ send_payment(&nodes[0], &[&nodes[1]], 1_000_000);
+ let payment_hash_opt = if use_htlc {
+ Some(route_payment(&nodes[1], &[&nodes[0]], 10_000).1)
+ } else {
+ None
+ };
+
+ if use_htlc {
+ nodes[0].node.fail_htlc_backwards(&payment_hash_opt.unwrap());
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[0],
+ [HTLCDestination::FailedPayment { payment_hash: payment_hash_opt.unwrap() }]);
+ } else {
+ *chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() *= 10;
+ nodes[0].node.timer_tick_occurred();
+ }
+ let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
+ check_added_monitors(&nodes[0], 1);
+
+ nodes[1].node.close_channel(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown);
+
+ if use_htlc {
+ nodes[1].node.handle_update_fail_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ } else {
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &updates.update_fee.unwrap());
+ }
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
+ check_added_monitors(&nodes[1], 1);
+ let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
+ check_added_monitors(&nodes[0], 1);
+
+ // At this point the Channel on nodes[0] has no record of any HTLCs but the latest
+ // broadcastable commitment does contain the HTLC (but only the ChannelMonitor knows this).
+ // Thus, the channel should not yet initiate closing_signed negotiation (but previously did).
+ assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
+
+ chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs);
+ check_added_monitors(&nodes[0], 1);
+ assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
+
+ expect_channel_shutdown_state!(nodes[0], chan_id, ChannelShutdownState::ResolvingHTLCs);
+ assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new());
+ let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone();
+ nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update);
+
+ let as_raa_closing_signed = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(as_raa_closing_signed.len(), 2);
+
+ if let MessageSendEvent::SendRevokeAndACK { msg, .. } = &as_raa_closing_signed[0] {
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &msg);
+ check_added_monitors(&nodes[1], 1);
+ if use_htlc {
+ expect_payment_failed!(nodes[1], payment_hash_opt.unwrap(), true);
+ }
+ } else { panic!("Unexpected message {:?}", as_raa_closing_signed[0]); }
+
+ if let MessageSendEvent::SendClosingSigned { msg, .. } = &as_raa_closing_signed[1] {
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &msg);
+ } else { panic!("Unexpected message {:?}", as_raa_closing_signed[1]); }
+
+ let bs_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &bs_closing_signed);
+ let (_, as_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &as_2nd_closing_signed.unwrap());
+ let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ assert!(node_1_none.is_none());
+
+ check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
+}
+
+#[test]
+fn outbound_update_no_early_closing_signed() {
+ do_outbound_update_no_early_closing_signed(true);
+ do_outbound_update_no_early_closing_signed(false);
+}
+
+#[test]
+fn batch_funding_failure() {
+ // Provides test coverage of batch funding failure, which previously deadlocked
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let temp_chan_id_a = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+ let temp_chan_id_b = exchange_open_accept_chan(&nodes[0], &nodes[2], 1_000_000, 0);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ // Build a transaction which only has the output for one of the two channels we're trying to
+ // confirm. Previously this led to a deadlock in channel closure handling.
+ let mut tx = Transaction { version: 2, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
+ let mut chans = Vec::new();
+ for (idx, ev) in events.iter().enumerate() {
+ if let Event::FundingGenerationReady { temporary_channel_id, counterparty_node_id, output_script, .. } = ev {
+ if idx == 0 {
+ tx.output.push(TxOut { value: 1_000_000, script_pubkey: output_script.clone() });
+ }
+ chans.push((temporary_channel_id, counterparty_node_id));
+ } else { panic!(); }
+ }
+
+ let err = "Error in transaction funding: Misuse error: No output matched the script_pubkey and value in the FundingGenerationReady event".to_string();
+ let temp_err = "No output matched the script_pubkey and value in the FundingGenerationReady event".to_string();
+ let post_funding_chan_id_a = ChannelId::v1_from_funding_txid(tx.txid().as_ref(), 0);
+ let close = [
+ ExpectedCloseEvent::from_id_reason(post_funding_chan_id_a, true, ClosureReason::ProcessingError { err: err.clone() }),
+ ExpectedCloseEvent::from_id_reason(temp_chan_id_b, false, ClosureReason::ProcessingError { err: temp_err }),
+ ];
+
+ nodes[0].node.batch_funding_transaction_generated(&chans, tx).unwrap_err();
+
+ let msgs = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msgs.len(), 3);
+ // We currently spuriously send `FundingCreated` for the first channel and then immediately
+ // fail both channels, which isn't ideal but should be fine.
+ assert!(msgs.iter().any(|msg| {
+ if let MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id, .. }, ..
+ }, .. } = msg {
+ *channel_id == temp_chan_id_b
+ } else { false }
+ }));
+ let funding_created_pos = msgs.iter().position(|msg| {
+ if let MessageSendEvent::SendFundingCreated { msg: msgs::FundingCreated { temporary_channel_id, .. }, .. } = msg {
+ assert_eq!(*temporary_channel_id, temp_chan_id_a);
+ true
+ } else { false }
+ }).unwrap();
+ let funded_channel_close_pos = msgs.iter().position(|msg| {
+ if let MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id, .. }, ..
+ }, .. } = msg {
+ *channel_id == post_funding_chan_id_a
+ } else { false }
+ }).unwrap();
+
+ // The error message uses the funded channel_id so must come after the funding_created
+ assert!(funded_channel_close_pos > funding_created_pos);
+
+ check_closed_events(&nodes[0], &close);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);