Rename set_equality within update_claims_view_from_matched_txn
[rust-lightning] / lightning / src / ln / functional_tests.rs
index d240a155fad770099f3240b365a568d74c12bafc..79c85e75447d5bb959ef391c5941d0d62db933ff 100644 (file)
@@ -30,7 +30,7 @@ use crate::ln::features::{ChannelFeatures, NodeFeatures};
 use crate::ln::msgs;
 use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
 use crate::util::enforcing_trait_impls::EnforcingSigner;
-use crate::util::{byte_utils, test_utils};
+use crate::util::test_utils;
 use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
 use crate::util::errors::APIError;
 use crate::util::ser::{Writeable, ReadableArgs};
@@ -705,7 +705,7 @@ fn test_update_fee_that_funder_cannot_afford() {
 
        // Assemble the set of keys we can use for signatures for our commitment_signed message.
        let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
-               &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap();
+               &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
 
        let res = {
                let local_chan_lock = nodes[0].node.channel_state.lock().unwrap();
@@ -834,7 +834,7 @@ fn test_update_fee_with_fundee_update_add_htlc() {
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
-               Event::PaymentReceived { .. } => { },
+               Event::PaymentClaimable { .. } => { },
                _ => panic!("Unexpected event"),
        };
 
@@ -1179,7 +1179,7 @@ fn holding_cell_htlc_counting() {
        assert_eq!(events.len(), payments.len());
        for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
                match event {
-                       &Event::PaymentReceived { ref payment_hash, .. } => {
+                       &Event::PaymentClaimable { ref payment_hash, .. } => {
                                assert_eq!(*payment_hash, *hash);
                        },
                        _ => panic!("Unexpected event"),
@@ -1412,7 +1412,7 @@ fn test_fee_spike_violation_fails_htlc() {
 
        // Assemble the set of keys we can use for signatures for our commitment_signed message.
        let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
-               &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap();
+               &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint);
 
        // Build the remote commitment transaction so we can sign it, and then later use the
        // signature for the commitment_signed message.
@@ -1936,7 +1936,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
        commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
 
        expect_pending_htlcs_forwardable!(nodes[2]);
-       expect_payment_received!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
+       expect_payment_claimable!(nodes[2], our_payment_hash_1, our_payment_secret_1, recv_value_1);
 
        // flush the htlcs in the holding cell
        assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
@@ -1956,9 +1956,11 @@ fn test_channel_reserve_holding_cell_htlcs() {
        let events = nodes[2].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
        match events[0] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
                        assert_eq!(our_payment_hash_21, *payment_hash);
                        assert_eq!(recv_value_21, amount_msat);
+                       assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
+                       assert_eq!(via_channel_id, Some(chan_2.2));
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -1970,9 +1972,11 @@ fn test_channel_reserve_holding_cell_htlcs() {
                _ => panic!("Unexpected event"),
        }
        match events[1] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
                        assert_eq!(our_payment_hash_22, *payment_hash);
                        assert_eq!(recv_value_22, amount_msat);
+                       assert_eq!(nodes[2].node.get_our_node_id(), receiver_node_id.unwrap());
+                       assert_eq!(via_channel_id, Some(chan_2.2));
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -2102,7 +2106,7 @@ fn channel_reserve_in_flight_removes() {
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
 
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_payment_received!(nodes[1], payment_hash_3, payment_secret_3, 100000);
+       expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 100000);
 
        // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
        // resolve the second HTLC from A's point of view.
@@ -2149,7 +2153,7 @@ fn channel_reserve_in_flight_removes() {
        check_added_monitors!(nodes[0], 1);
 
        expect_pending_htlcs_forwardable!(nodes[0]);
-       expect_payment_received!(nodes[0], payment_hash_4, payment_secret_4, 10000);
+       expect_payment_claimable!(nodes[0], payment_hash_4, payment_secret_4, 10000);
 
        claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
@@ -3623,15 +3627,16 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
        let mut as_channel_ready = None;
-       if messages_delivered == 0 {
-               let (channel_ready, _, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+       let channel_id = if messages_delivered == 0 {
+               let (channel_ready, chan_id, _) = create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, channelmanager::provided_init_features(), channelmanager::provided_init_features());
                as_channel_ready = Some(channel_ready);
                // nodes[1] doesn't receive the channel_ready message (it'll be re-sent on reconnect)
                // Note that we store it so that if we're running with `simulate_broken_lnd` we can deliver
                // it before the channel_reestablish message.
+               chan_id
        } else {
-               create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
-       }
+               create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).2
+       };
 
        let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1_000_000);
 
@@ -3734,9 +3739,11 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken
        let events_2 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_2.len(), 1);
        match events_2[0] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, amount_msat } => {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, via_channel_id, via_user_channel_id: _ } => {
                        assert_eq!(payment_hash_1, *payment_hash);
                        assert_eq!(amount_msat, 1_000_000);
+                       assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id());
+                       assert_eq!(via_channel_id, Some(channel_id));
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
                                        assert!(payment_preimage.is_none());
@@ -4018,7 +4025,7 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        let events_5 = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events_5.len(), 1);
        match events_5[0] {
-               Event::PaymentReceived { ref payment_hash, ref purpose, .. } => {
+               Event::PaymentClaimable { ref payment_hash, ref purpose, .. } => {
                        assert_eq!(payment_hash_2, *payment_hash);
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
@@ -4061,7 +4068,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
                let mut events = nodes[0].node.get_and_clear_pending_msg_events();
                assert_eq!(events.len(), 1);
                // Now do the relevant commitment_signed/RAA dances along the path, noting that the final
-               // hop should *not* yet generate any PaymentReceived event(s).
+               // hop should *not* yet generate any PaymentClaimable event(s).
                pass_along_path(&nodes[0], &[&nodes[1]], 100000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
                our_payment_hash
        } else {
@@ -4093,8 +4100,8 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) {
        nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_timeout_updates.update_fail_htlcs[0]);
        commitment_signed_dance!(nodes[0], nodes[1], htlc_timeout_updates.commitment_signed, false);
        // 100_000 msat as u64, followed by the height at which we failed back above
-       let mut expected_failure_data = byte_utils::be64_to_array(100_000).to_vec();
-       expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(block_count - 1));
+       let mut expected_failure_data = (100_000 as u64).to_be_bytes().to_vec();
+       expected_failure_data.extend_from_slice(&(block_count - 1).to_be_bytes());
        expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000 | 15, &expected_failure_data[..]);
 }
 
@@ -4796,7 +4803,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        assert_eq!(htlc_success_txn[2], commitment_txn[0]);
        assert_eq!(htlc_success_txn[3], htlc_success_txn[0]);
        assert_eq!(htlc_success_txn[4], htlc_success_txn[1]);
-       assert_ne!(htlc_success_txn[0].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
+       assert_ne!(htlc_success_txn[1].input[0].previous_output, htlc_timeout_tx.input[0].previous_output);
 
        mine_transaction(&nodes[1], &htlc_timeout_tx);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
@@ -4819,7 +4826,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() {
        // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
        // Note that the fee paid is effectively double as the HTLC value (including the nodes[1] fee
        // and nodes[2] fee) is rounded down and then claimed in full.
-       mine_transaction(&nodes[1], &htlc_success_txn[0]);
+       mine_transaction(&nodes[1], &htlc_success_txn[1]);
        expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(196*2), true, true);
        let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
        assert!(updates.update_add_htlcs.is_empty());
@@ -5793,7 +5800,7 @@ fn test_free_and_fail_holding_cell_htlcs() {
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
-               Event::PaymentReceived { .. } => {},
+               Event::PaymentClaimable { .. } => {},
                _ => panic!("Unexpected event"),
        }
        nodes[1].node.claim_funds(payment_preimage_1);
@@ -6016,7 +6023,7 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
                .with_features(channelmanager::provided_invoice_features());
        let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000, 0);
        route.paths[0].last_mut().unwrap().cltv_expiry_delta = 500000001;
-       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::RouteError { ref err },
+       unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::InvalidRoute { ref err },
                assert_eq!(err, &"Channel CLTV overflowed?"));
 }
 
@@ -6052,7 +6059,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
 
                expect_pending_htlcs_forwardable!(nodes[1]);
-               expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 100000);
+               expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000);
        }
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
        unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err },
@@ -6967,8 +6974,8 @@ fn test_check_htlc_underpaying() {
        commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
 
        // 10_000 msat as u64, followed by a height of CHAN_CONFIRM_DEPTH as u32
-       let mut expected_failure_data = byte_utils::be64_to_array(10_000).to_vec();
-       expected_failure_data.extend_from_slice(&byte_utils::be32_to_array(CHAN_CONFIRM_DEPTH));
+       let mut expected_failure_data = (10_000 as u64).to_be_bytes().to_vec();
+       expected_failure_data.extend_from_slice(&CHAN_CONFIRM_DEPTH.to_be_bytes());
        expect_payment_failed!(nodes[0], our_payment_hash, true, 0x4000|15, &expected_failure_data[..]);
 }
 
@@ -7947,7 +7954,7 @@ fn test_preimage_storage() {
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
-               Event::PaymentReceived { ref purpose, .. } => {
+               Event::PaymentClaimable { ref purpose, .. } => {
                        match &purpose {
                                PaymentPurpose::InvoicePayment { payment_preimage, .. } => {
                                        claim_payment(&nodes[0], &[&nodes[1]], payment_preimage.unwrap());
@@ -8017,7 +8024,7 @@ fn test_secret_timeout() {
        let events = nodes[1].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 1);
        match events[0] {
-               Event::PaymentReceived { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => {
+               Event::PaymentClaimable { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => {
                        assert!(payment_preimage.is_none());
                        assert_eq!(payment_secret, our_payment_secret);
                        // We don't actually have the payment preimage with which to claim this payment!
@@ -8124,7 +8131,7 @@ fn test_update_err_monitor_lockdown() {
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+                               &mut io::Cursor::new(&w.0), nodes[0].keys_manager).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
@@ -8188,7 +8195,7 @@ fn test_concurrent_monitor_claim() {
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+                               &mut io::Cursor::new(&w.0), nodes[0].keys_manager).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
@@ -8217,7 +8224,7 @@ fn test_concurrent_monitor_claim() {
                let mut w = test_utils::TestVecWriter(Vec::new());
                monitor.write(&mut w).unwrap();
                let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
-                               &mut io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
+                               &mut io::Cursor::new(&w.0), nodes[0].keys_manager).unwrap().1;
                assert!(new_monitor == *monitor);
                let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
                assert_eq!(watchtower.watch_channel(outpoint, new_monitor), ChannelMonitorUpdateStatus::Completed);
@@ -8952,7 +8959,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
                commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
        }
        expect_pending_htlcs_forwardable!(nodes[1]);
-       expect_payment_received!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
+       expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 10_000);
 
        {
                // Note that we use a different PaymentId here to allow us to duplicativly pay
@@ -9014,7 +9021,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) {
 #[test]
 fn test_dup_htlc_second_fail_panic() {
        // Previously, if we received two HTLCs back-to-back, where the second overran the expected
-       // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event.
+       // value for the payment, we'd fail back both HTLCs after generating a `PaymentClaimable` event.
        // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed
        // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed.
        do_test_dup_htlc_second_rejected(true);
@@ -9201,9 +9208,9 @@ fn test_keysend_payments_to_private_node() {
 
 #[test]
 fn test_double_partial_claim() {
-       // Test what happens if a node receives a payment, generates a PaymentReceived event, the HTLCs
+       // Test what happens if a node receives a payment, generates a PaymentClaimable event, the HTLCs
        // time out, the sender resends only some of the MPP parts, then the user processes the
-       // PaymentReceived event, ensuring they don't inadvertently claim only part of the full payment
+       // PaymentClaimable event, ensuring they don't inadvertently claim only part of the full payment
        // amount.
        let chanmon_cfgs = create_chanmon_cfgs(4);
        let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
@@ -9224,7 +9231,7 @@ fn test_double_partial_claim() {
        });
 
        send_along_route_with_secret(&nodes[0], route.clone(), &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 15_000_000, payment_hash, payment_secret);
-       // nodes[3] has now received a PaymentReceived event...which it will take some (exorbitant)
+       // nodes[3] has now received a PaymentClaimable event...which it will take some (exorbitant)
        // amount of time to respond to.
 
        // Connect some blocks to time out the payment
@@ -9248,7 +9255,7 @@ fn test_double_partial_claim() {
        pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
 
        // At this point nodes[3] has received one half of the payment, and the user goes to handle
-       // that PaymentReceived event they got hours ago and never handled...we should refuse to claim.
+       // that PaymentClaimable event they got hours ago and never handled...we should refuse to claim.
        nodes[3].node.claim_funds(payment_preimage);
        check_added_monitors!(nodes[3], 0);
        assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty());
@@ -9469,3 +9476,81 @@ fn test_non_final_funding_tx() {
        assert!(nodes[0].node.funding_transaction_generated(&temp_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).is_ok());
        get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
 }
+
+#[test]
+fn accept_busted_but_better_fee() {
+       // If a peer sends us a fee update that is too low, but higher than our previous channel
+       // feerate, we should accept it. In the future we may want to consider closing the channel
+       // later, but for now we only accept the update.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
+
+       // Set nodes[1] to expect 5,000 sat/kW.
+       {
+               let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock = 5000;
+       }
+
+       // If nodes[0] increases their feerate, even if its not enough, nodes[1] should accept it.
+       {
+               let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock = 1000;
+       }
+       nodes[0].node.timer_tick_occurred();
+       check_added_monitors!(nodes[0], 1);
+
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
+                       nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
+                       commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       // If nodes[0] increases their feerate further, even if its not enough, nodes[1] should accept
+       // it.
+       {
+               let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock = 2000;
+       }
+       nodes[0].node.timer_tick_occurred();
+       check_added_monitors!(nodes[0], 1);
+
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
+                       nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
+                       commitment_signed_dance!(nodes[1], nodes[0], commitment_signed, false);
+               },
+               _ => panic!("Unexpected event"),
+       };
+
+       // However, if nodes[0] decreases their feerate, nodes[1] should reject it and close the
+       // channel.
+       {
+               let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap();
+               *feerate_lock = 1000;
+       }
+       nodes[0].node.timer_tick_occurred();
+       check_added_monitors!(nodes[0], 1);
+
+       let events = nodes[0].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
+                       nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_fee.as_ref().unwrap());
+                       check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError {
+                               err: "Peer's feerate much too low. Actual: 1000. Our expected lower limit: 5000 (- 250)".to_owned() });
+                       check_closed_broadcast!(nodes[1], true);
+                       check_added_monitors!(nodes[1], 1);
+               },
+               _ => panic!("Unexpected event"),
+       };
+}