Set UpdateAddHTLC::skimmed_fee_msat on forward
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 58977e7896419cd56fa58f53f56f5b59cf93cc12..63b3b2bb0a49d9ba661f00050ddb7c2c031ed098 100644 (file)
@@ -20,9 +20,9 @@ use crate::chain::transaction::OutPoint;
 use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
 use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
-use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, Channel, ChannelError};
+use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
 use crate::ln::{chan_utils, onion_utils};
 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
@@ -179,9 +179,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
                let mut sender_node_per_peer_lock;
                let mut sender_node_peer_state_lock;
-               let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-               chan.context.holder_selected_channel_reserve_satoshis = 0;
-               chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               if send_from_initiator {
+                       let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+                       chan.context.holder_selected_channel_reserve_satoshis = 0;
+                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               } else {
+                       let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+                       chan.context.holder_selected_channel_reserve_satoshis = 0;
+                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               }
        }
 
        let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
@@ -1387,6 +1393,7 @@ fn test_fee_spike_violation_fails_htlc() {
                payment_hash: payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
@@ -1576,6 +1583,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
                payment_hash: payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
@@ -1752,6 +1760,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
                payment_hash: our_payment_hash_1,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
@@ -3404,6 +3413,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                        payment_hash,
                        cltv_expiry,
                        onion_routing_packet,
+                       skimmed_fee_msat: None,
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
@@ -3614,8 +3624,8 @@ fn test_peer_disconnected_before_funding_broadcasted() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false);
+       check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false);
 }
 
 #[test]
@@ -6253,6 +6263,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
                payment_hash: our_payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet.clone(),
+               skimmed_fee_msat: None,
        };
 
        for i in 0..50 {
@@ -6950,8 +6961,8 @@ fn test_user_configurable_csv_delay() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
-       if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
+       if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
                &low_our_to_self_config, 0, 42)
        {
@@ -6961,11 +6972,11 @@ fn test_user_configurable_csv_delay() {
                }
        } else { assert!(false) }
 
-       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
+       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &low_our_to_self_config, 0, &nodes[0].logger, 42)
        {
@@ -6993,11 +7004,11 @@ fn test_user_configurable_csv_delay() {
        } else { panic!(); }
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
 
-       // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
+       // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &high_their_to_self_config, 0, &nodes[0].logger, 42)
        {
@@ -7868,7 +7879,7 @@ fn test_reject_funding_before_inbound_channel_accepted() {
        let accept_chan_msg = {
                let mut node_1_per_peer_lock;
                let mut node_1_peer_state_lock;
-               let channel =  get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
+               let channel =  get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
                channel.get_accept_channel_message()
        };
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
@@ -8928,16 +8939,16 @@ fn test_duplicate_chan_id() {
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
        create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
 
-       let funding_created = {
+       let (_, funding_created) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
+               let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
-               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
+               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@@ -9603,7 +9614,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        if on_holder_tx {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
+               let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
                chan.context.holder_dust_limit_satoshis = 546;
        }
 
@@ -9628,7 +9639,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
-               chan.get_dust_buffer_feerate(None) as u64
+               chan.context.get_dust_buffer_feerate(None) as u64
        };
        let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
        let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;