+
+#[test]
+fn batch_funding_failure() {
+ // Provides test coverage of batch funding failure, which previously deadlocked
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ let temp_chan_id_a = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
+ let temp_chan_id_b = exchange_open_accept_chan(&nodes[0], &nodes[2], 1_000_000, 0);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ // Build a transaction which only has the output for one of the two channels we're trying to
+ // confirm. Previously this led to a deadlock in channel closure handling.
+ let mut tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: Vec::new() };
+ let mut chans = Vec::new();
+ for (idx, ev) in events.iter().enumerate() {
+ if let Event::FundingGenerationReady { temporary_channel_id, counterparty_node_id, output_script, .. } = ev {
+ if idx == 0 {
+ tx.output.push(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: output_script.clone() });
+ }
+ chans.push((temporary_channel_id, counterparty_node_id));
+ } else { panic!(); }
+ }
+
+ let err = "Error in transaction funding: Misuse error: No output matched the script_pubkey and value in the FundingGenerationReady event".to_string();
+ let temp_err = "No output matched the script_pubkey and value in the FundingGenerationReady event".to_string();
+ let post_funding_chan_id_a = ChannelId::v1_from_funding_txid(tx.txid().as_ref(), 0);
+ let close = [
+ ExpectedCloseEvent::from_id_reason(post_funding_chan_id_a, true, ClosureReason::ProcessingError { err: err.clone() }),
+ ExpectedCloseEvent::from_id_reason(temp_chan_id_b, false, ClosureReason::ProcessingError { err: temp_err }),
+ ];
+
+ nodes[0].node.batch_funding_transaction_generated(&chans, tx).unwrap_err();
+
+ let msgs = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msgs.len(), 3);
+ // We currently spuriously send `FundingCreated` for the first channel and then immediately
+ // fail both channels, which isn't ideal but should be fine.
+ assert!(msgs.iter().any(|msg| {
+ if let MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id, .. }, ..
+ }, .. } = msg {
+ *channel_id == temp_chan_id_b
+ } else { false }
+ }));
+ let funding_created_pos = msgs.iter().position(|msg| {
+ if let MessageSendEvent::SendFundingCreated { msg: msgs::FundingCreated { temporary_channel_id, .. }, .. } = msg {
+ assert_eq!(*temporary_channel_id, temp_chan_id_a);
+ true
+ } else { false }
+ }).unwrap();
+ let funded_channel_close_pos = msgs.iter().position(|msg| {
+ if let MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id, .. }, ..
+ }, .. } = msg {
+ *channel_id == post_funding_chan_id_a
+ } else { false }
+ }).unwrap();
+
+ // The error message uses the funded channel_id so must come after the funding_created
+ assert!(funded_channel_close_pos > funding_created_pos);
+
+ check_closed_events(&nodes[0], &close);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+}
+
+#[test]
+fn test_force_closure_on_low_stale_fee() {
+ // Check that we force-close channels if they have a low fee and that has gotten stale (without
+ // update).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+ // Start by connecting lots of blocks to give LDK some feerate history
+ for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS * 2 {
+ connect_blocks(&nodes[1], 1);
+ }
+
+ // Now connect a handful of blocks with a "high" feerate
+ {
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock *= 2;
+ }
+ for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+ connect_blocks(&nodes[1], 1);
+ }
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Now, note that one more block would cause us to force-close, it won't because we've dropped
+ // the feerate
+ {
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock /= 2;
+ }
+ connect_blocks(&nodes[1], super::channelmanager::FEERATE_TRACKING_BLOCKS as u32 * 2);
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Now, connect another FEERATE_TRACKING_BLOCKS - 1 blocks at a high feerate, note that none of
+ // these will cause a force-closure because LDK only looks at the minimium feerate over the
+ // last FEERATE_TRACKING_BLOCKS blocks.
+ {
+ let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+ *feerate_lock *= 2;
+ }
+
+ for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+ connect_blocks(&nodes[1], 1);
+ }
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ // Finally, connect one more block and check the force-close happened.
+ connect_blocks(&nodes[1], 1);
+ check_added_monitors!(nodes[1], 1);
+ check_closed_broadcast(&nodes[1], 1, true);
+ let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, required_feerate_sat_per_kw: 253 * 2 };
+ check_closed_events(&nodes[1], &[ExpectedCloseEvent::from_id_reason(chan_id, false, reason)]);
+}