Merge pull request #3144 from TheBlueMatt/2024-06-message-flags
[rust-lightning] / lightning / src / ln / shutdown_tests.rs
index 2db859360d870f75b79fe94261c1074630354bd5..8f3121a8673a05b1fbf6bafabb21afb109181171 100644 (file)
@@ -291,7 +291,7 @@ fn close_on_unfunded_channel() {
        let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
 
        nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap();
-       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1_000_000);
+       check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) }, [nodes[1].node.get_our_node_id()], 1_000_000);
 }
 
 #[test]
@@ -302,11 +302,12 @@ fn expect_channel_shutdown_state_with_force_closure() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+       let error_message = "Channel force-closed";
 
        expect_channel_shutdown_state!(nodes[0], chan_1.2, ChannelShutdownState::NotShuttingDown);
        expect_channel_shutdown_state!(nodes[1], chan_1.2, ChannelShutdownState::NotShuttingDown);
 
-       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap();
+       nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
        check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
 
@@ -323,7 +324,7 @@ fn expect_channel_shutdown_state_with_force_closure() {
        assert!(nodes[1].node.list_channels().is_empty());
        check_closed_broadcast!(nodes[0], true);
        check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed, [nodes[1].node.get_our_node_id()], 100000);
-       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+       check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[0].node.get_our_node_id()], 100000);
 }
 
 #[test]
@@ -576,7 +577,7 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) {
                node_0_2nd_shutdown
        } else {
                let node_0_chan_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id());
-               assert_eq!(node_0_chan_update.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
+               assert_eq!(node_0_chan_update.contents.channel_flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
                nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown);
                get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
        };
@@ -1187,7 +1188,7 @@ fn do_test_closing_signed_reinit_timeout(timeout_step: TimeoutStep) {
                assert_eq!(events.len(), 1);
                match events[0] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
-                               assert_eq!(msg.contents.flags & 2, 2);
+                               assert_eq!(msg.contents.channel_flags & 2, 2);
                        },
                        _ => panic!("Unexpected event"),
                }
@@ -1463,3 +1464,59 @@ fn batch_funding_failure() {
        check_closed_events(&nodes[0], &close);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
 }
+
+#[test]
+fn test_force_closure_on_low_stale_fee() {
+       // Check that we force-close channels if they have a low fee and that has gotten stale (without
+       // update).
+       let chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+
+       // Start by connecting lots of blocks to give LDK some feerate history
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS * 2 {
+               connect_blocks(&nodes[1], 1);
+       }
+
+       // Now connect a handful of blocks with a "high" feerate
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock *= 2;
+       }
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+               connect_blocks(&nodes[1], 1);
+       }
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Now, note that one more block would cause us to force-close, it won't because we've dropped
+       // the feerate
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock /= 2;
+       }
+       connect_blocks(&nodes[1], super::channelmanager::FEERATE_TRACKING_BLOCKS as u32 * 2);
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Now, connect another FEERATE_TRACKING_BLOCKS - 1 blocks at a high feerate, note that none of
+       // these will cause a force-closure because LDK only looks at the minimium feerate over the
+       // last FEERATE_TRACKING_BLOCKS blocks.
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock *= 2;
+       }
+
+       for _ in 0..super::channelmanager::FEERATE_TRACKING_BLOCKS - 1 {
+               connect_blocks(&nodes[1], 1);
+       }
+       assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+       // Finally, connect one more block and check the force-close happened.
+       connect_blocks(&nodes[1], 1);
+       check_added_monitors!(nodes[1], 1);
+       check_closed_broadcast(&nodes[1], 1, true);
+       let reason = ClosureReason::PeerFeerateTooLow { peer_feerate_sat_per_kw: 253, required_feerate_sat_per_kw: 253 * 2 };
+       check_closed_events(&nodes[1], &[ExpectedCloseEvent::from_id_reason(chan_id, false, reason)]);
+}