Document nonzero anchors in features module.
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 6af81108f27a88e76a7520bf9371c34f8bcfdf27..8f093f2a92237984ca81751ed8be2c40a3972952 100644 (file)
@@ -20,9 +20,9 @@ use crate::chain::transaction::OutPoint;
 use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
 use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
-use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, Channel, ChannelError};
+use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
 use crate::ln::{chan_utils, onion_utils};
 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
@@ -179,9 +179,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
                let mut sender_node_per_peer_lock;
                let mut sender_node_peer_state_lock;
-               let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-               chan.context.holder_selected_channel_reserve_satoshis = 0;
-               chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               if send_from_initiator {
+                       let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+                       chan.context.holder_selected_channel_reserve_satoshis = 0;
+                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               } else {
+                       let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+                       chan.context.holder_selected_channel_reserve_satoshis = 0;
+                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               }
        }
 
        let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
@@ -3614,8 +3620,8 @@ fn test_peer_disconnected_before_funding_broadcasted() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false);
+       check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false);
 }
 
 #[test]
@@ -6950,8 +6956,8 @@ fn test_user_configurable_csv_delay() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new_outbound()
-       if let Err(error) = OutboundV1Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
+       if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
                &low_our_to_self_config, 0, 42)
        {
@@ -6961,11 +6967,11 @@ fn test_user_configurable_csv_delay() {
                }
        } else { assert!(false) }
 
-       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
+       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &low_our_to_self_config, 0, &nodes[0].logger, 42)
        {
@@ -6993,11 +6999,11 @@ fn test_user_configurable_csv_delay() {
        } else { panic!(); }
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
 
-       // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
+       // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &high_their_to_self_config, 0, &nodes[0].logger, 42)
        {
@@ -7868,7 +7874,7 @@ fn test_reject_funding_before_inbound_channel_accepted() {
        let accept_chan_msg = {
                let mut node_1_per_peer_lock;
                let mut node_1_peer_state_lock;
-               let channel =  get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
+               let channel =  get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
                channel.get_accept_channel_message()
        };
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
@@ -8196,67 +8202,6 @@ fn test_preimage_storage() {
        }
 }
 
-#[test]
-#[allow(deprecated)]
-fn test_secret_timeout() {
-       // Simple test of payment secret storage time outs. After
-       // `create_inbound_payment(_for_hash)_legacy` is removed, this test will be removed as well.
-       let chanmon_cfgs = create_chanmon_cfgs(2);
-       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
-       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
-       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
-       create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
-
-       let (payment_hash, payment_secret_1) = nodes[1].node.create_inbound_payment_legacy(Some(100_000), 2).unwrap();
-
-       // We should fail to register the same payment hash twice, at least until we've connected a
-       // block with time 7200 + CHAN_CONFIRM_DEPTH + 1.
-       if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
-               assert_eq!(err, "Duplicate payment hash");
-       } else { panic!(); }
-       let mut block = {
-               let node_1_blocks = nodes[1].blocks.lock().unwrap();
-               create_dummy_block(node_1_blocks.last().unwrap().0.block_hash(), node_1_blocks.len() as u32 + 7200, Vec::new())
-       };
-       connect_block(&nodes[1], &block);
-       if let Err(APIError::APIMisuseError { err }) = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2) {
-               assert_eq!(err, "Duplicate payment hash");
-       } else { panic!(); }
-
-       // If we then connect the second block, we should be able to register the same payment hash
-       // again (this time getting a new payment secret).
-       block.header.prev_blockhash = block.header.block_hash();
-       block.header.time += 1;
-       connect_block(&nodes[1], &block);
-       let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash_legacy(payment_hash, Some(100_000), 2).unwrap();
-       assert_ne!(payment_secret_1, our_payment_secret);
-
-       {
-               let (route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000);
-               nodes[0].node.send_payment_with_route(&route, payment_hash,
-                       RecipientOnionFields::secret_only(our_payment_secret), PaymentId(payment_hash.0)).unwrap();
-               check_added_monitors!(nodes[0], 1);
-               let mut events = nodes[0].node.get_and_clear_pending_msg_events();
-               let mut payment_event = SendEvent::from_event(events.pop().unwrap());
-               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
-               commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
-       }
-       // Note that after leaving the above scope we have no knowledge of any arguments or return
-       // values from previous calls.
-       expect_pending_htlcs_forwardable!(nodes[1]);
-       let events = nodes[1].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
-       match events[0] {
-               Event::PaymentClaimable { purpose: PaymentPurpose::InvoicePayment { payment_preimage, payment_secret }, .. } => {
-                       assert!(payment_preimage.is_none());
-                       assert_eq!(payment_secret, our_payment_secret);
-                       // We don't actually have the payment preimage with which to claim this payment!
-               },
-               _ => panic!("Unexpected event"),
-       }
-}
-
 #[test]
 fn test_bad_secret_hash() {
        // Simple test of unregistered payment hash/invalid payment secret handling
@@ -8928,16 +8873,16 @@ fn test_duplicate_chan_id() {
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
        create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
 
-       let funding_created = {
+       let (_, funding_created) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
+               let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
-               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
+               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@@ -9603,7 +9548,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        if on_holder_tx {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
+               let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
                chan.context.holder_dust_limit_satoshis = 546;
        }