X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=0775f58bbcadb309508c3291846c36941d742eea;hb=25a707314f70fc2f59488d4de34facb1428fc06d;hp=dfb101b1f907c85b82fddf68b86632e3b08d8e02;hpb=b09ccd10be82a0093ae7b1b196c739fb8f32c42b;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index dfb101b1..0775f58b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -22,7 +22,7 @@ use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFail use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; -use crate::ln::channel::{Channel, ChannelError}; +use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, Channel, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; @@ -1125,10 +1125,8 @@ fn holding_cell_htlc_counting() { { unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1, RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1); } // This should also be true if we try to forward a payment. @@ -1351,16 +1349,12 @@ fn test_basic_channel_reserve() { RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap(); match err { PaymentSendFailure::AllFailedResendSafe(ref fails) => { - match &fails[0] { - &APIError::ChannelUnavailable{ref err} => - assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)), - _ => panic!("Unexpected error variant"), - } + if let &APIError::ChannelUnavailable { .. } = &fails[0] {} + else { panic!("Unexpected error variant"); } }, _ => panic!("Unexpected error variant"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 1); send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); } @@ -1527,19 +1521,18 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt); + // Fetch a route in advance as we will be unable to once we're unable to send. + let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); // Sending exactly enough to hit the reserve amount should be accepted for _ in 0..MIN_AFFORDABLE_HTLC_COUNT { let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); } // However one more HTLC should be significantly over the reserve amount and fail. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 1_000_000); unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value")); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_string(), 1); } #[test] @@ -1565,7 +1558,9 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let (_, _, _) = route_payment(&nodes[1], &[&nodes[0]], 1_000_000); } - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 700_000); + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 1000); + route.paths[0].hops[0].fee_msat = 700_000; // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() let secp_ctx = Secp256k1::new(); let session_priv = SecretKey::from_slice(&[42; 32]).unwrap(); @@ -1627,11 +1622,12 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { } // One more than the dust amt should fail, however. - let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt + 1); + let (mut route, our_payment_hash, _, our_payment_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); + route.paths[0].hops[0].fee_msat += 1; unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value")); + ), true, APIError::ChannelUnavailable { .. }, {}); } #[test] @@ -1839,10 +1835,8 @@ fn test_channel_reserve_holding_cell_htlcs() { unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1); } // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete @@ -1913,8 +1907,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1944,10 +1937,8 @@ fn test_channel_reserve_holding_cell_htlcs() { route.paths[0].hops.last_mut().unwrap().fee_msat += 1; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 2); } let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); @@ -4036,10 +4027,14 @@ fn test_drop_messages_peer_disconnect_dual_htlc() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -5730,9 +5725,6 @@ fn test_fail_holding_cell_htlc_upon_free() { chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1); - let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", - hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2)); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); // Check that the payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -5821,9 +5813,6 @@ fn test_free_and_fail_holding_cell_htlcs() { chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2); assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1); - let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}", - hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2)); - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); // Check that the second payment failed to be sent out. let events = nodes[0].node.get_and_clear_pending_events(); @@ -6032,10 +6021,8 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send less than their minimum HTLC value", 1); } #[test] @@ -6141,11 +6128,9 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() } unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1); } #[test] @@ -6167,11 +6152,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { route.paths[0].hops[0].fee_msat = max_in_flight + 1; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); - + ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1); send_payment(&nodes[0], &[&nodes[1]], max_in_flight); } @@ -6356,10 +6338,14 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { //Disconnect and Reconnect nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]); @@ -7122,10 +7108,14 @@ fn test_announce_disable_channels() { } } // Reconnect peers - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 3); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 3); @@ -9452,7 +9442,7 @@ fn test_keysend_payments_to_public_node() { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false), final_value_msat: 10000, }; let scorer = test_utils::TestScorer::new(); @@ -9483,7 +9473,7 @@ fn test_keysend_payments_to_private_node() { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]); let route_params = RouteParameters { - payment_params: PaymentParameters::for_keysend(payee_pubkey, 40), + payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false), final_value_msat: 10000, }; let network_graph = nodes[0].network_graph.clone(); @@ -9630,6 +9620,10 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready); update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); + // Fetch a route in advance as we will be unable to once we're unable to send. + let (mut route, payment_hash, _, payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000); + let dust_buffer_feerate = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); @@ -9642,7 +9636,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e let dust_inbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_success_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000; let dust_inbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_inbound_htlc_on_holder_tx_msat; - let dust_htlc_on_counterparty_tx: u64 = 25; + let dust_htlc_on_counterparty_tx: u64 = 4; let dust_htlc_on_counterparty_tx_msat: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_htlc_on_counterparty_tx; if on_holder_tx { @@ -9667,7 +9661,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if dust_outbound_balance { // Outbound dust threshold: 2132 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) // Outbound dust balance: 5000 sats - for _ in 0..dust_htlc_on_counterparty_tx { + for _ in 0..dust_htlc_on_counterparty_tx - 1 { let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], dust_htlc_on_counterparty_tx_msat); nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); @@ -9675,32 +9669,27 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e } else { // Inbound dust threshold: 2031 sats (`dust_buffer_feerate` * HTLC_TIMEOUT_TX_WEIGHT / 1000 + counteparty's `dust_limit_satoshis`) // Inbound dust balance: 5000 sats - for _ in 0..dust_htlc_on_counterparty_tx { + for _ in 0..dust_htlc_on_counterparty_tx - 1 { route_payment(&nodes[1], &[&nodes[0]], dust_htlc_on_counterparty_tx_msat); } } } - let dust_overflow = dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx + 1); if exposure_breach_event == ExposureEvent::AtHTLCForward { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); - let mut config = UserConfig::default(); + route.paths[0].hops.last_mut().unwrap().fee_msat = + if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }; // With default dust exposure: 5000 sats if on_holder_tx { - let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1); - let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat; unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat))); + ), true, APIError::ChannelUnavailable { .. }, {}); } else { unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), true, APIError::ChannelUnavailable { ref err }, - assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat))); + ), true, APIError::ChannelUnavailable { .. }, {}); } } else if exposure_breach_event == ExposureEvent::AtHTLCReception { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat }); + let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }); nodes[1].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); check_added_monitors!(nodes[1], 1); @@ -9716,10 +9705,13 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat), 1); } else { // Outbound dust balance: 5200 sats - nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", dust_overflow, config.channel_config.max_dust_htlc_exposure_msat), 1); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), + format!("Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx", + dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 1, + config.channel_config.max_dust_htlc_exposure_msat), 1); } } else if exposure_breach_event == ExposureEvent::AtUpdateFeeOutbound { - let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 2_500_000); + route.paths[0].hops.last_mut().unwrap().fee_msat = 2_500_000; nodes[0].node.send_payment_with_route(&route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); { @@ -9929,3 +9921,132 @@ fn test_payment_with_custom_min_cltv_expiry_delta() { do_payment_with_custom_min_final_cltv_expiry(true, false); do_payment_with_custom_min_final_cltv_expiry(true, true); } + +#[test] +fn test_disconnects_peer_awaiting_response_ticks() { + // Tests that nodes which are awaiting on a response critical for channel responsiveness + // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Asserts a disconnect event is queued to the user. + let check_disconnect_event = |node: &Node, should_disconnect: bool| { + let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event| + if let MessageSendEvent::HandleError { action, .. } = event { + if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action { + Some(()) + } else { + None + } + } else { + None + } + ); + assert_eq!(disconnect_event.is_some(), should_disconnect); + }; + + // Fires timer ticks ensuring we only attempt to disconnect peers after reaching + // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + let check_disconnect = |node: &Node| { + // No disconnect without any timer ticks. + check_disconnect_event(node, false); + + // No disconnect with 1 timer tick less than required. + for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 { + node.node.timer_tick_occurred(); + check_disconnect_event(node, false); + } + + // Disconnect after reaching the required ticks. + node.node.timer_tick_occurred(); + check_disconnect_event(node, true); + + // Disconnect again on the next tick if the peer hasn't been disconnected yet. + node.node.timer_tick_occurred(); + check_disconnect_event(node, true); + }; + + create_chan_between_nodes(&nodes[0], &nodes[1]); + + // We'll start by performing a fee update with Alice (nodes[0]) on the channel. + *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2; + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(&nodes[0], 1); + let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed); + check_added_monitors!(&nodes[1], 1); + + // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`. + let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack); + check_added_monitors!(&nodes[0], 1); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed); + check_added_monitors(&nodes[0], 1); + + // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We + // pretend Bob hasn't received the message and check whether he'll disconnect Alice after + // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + check_disconnect(&nodes[1]); + + // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message. + // + // Note that since the commitment dance didn't complete above, Alice is expected to resend her + // final `RevokeAndACK` to Bob to complete it. + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + let bob_init = msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }; + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap(); + let alice_init = msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }; + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap(); + + // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't + // received Bob's yet, so she should disconnect him after reaching + // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + let alice_channel_reestablish = get_event_msg!( + nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id() + ); + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish); + check_disconnect(&nodes[0]); + + // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live". + let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event| + if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + Some(msg.clone()) + } else { + None + } + ).unwrap(); + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish); + + // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages. + for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS { + nodes[0].node.timer_tick_occurred(); + check_disconnect_event(&nodes[0], false); + } + + // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after + // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`. + check_disconnect(&nodes[1]); + + // Finally, have Bob process the last message. + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack); + check_added_monitors(&nodes[1], 1); + + // At this point, neither node should attempt to disconnect each other, since they aren't + // waiting on any messages. + for node in &nodes { + for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS { + node.node.timer_tick_occurred(); + check_disconnect_event(node, false); + } + } +}