Set UpdateAddHTLC::skimmed_fee_msat on forward
[rust-lightning] / lightning / src / ln / functional_tests.rs
index 3985ac03bbd1ce391067264d914d951439f0fc05..63b3b2bb0a49d9ba661f00050ddb7c2c031ed098 100644 (file)
@@ -20,9 +20,9 @@ use crate::chain::transaction::OutPoint;
 use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource};
 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
 use crate::ln::{PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel};
 use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
-use crate::ln::channel::{Channel, ChannelError};
+use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
 use crate::ln::{chan_utils, onion_utils};
 use crate::ln::chan_utils::{OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment};
 use crate::routing::gossip::{NetworkGraph, NetworkUpdate};
@@ -75,7 +75,7 @@ fn test_insane_channel_opens() {
        // Instantiate channel parameters where we push the maximum msats given our
        // funding satoshis
        let channel_value_sat = 31337; // same as funding satoshis
-       let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
+       let channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
        let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
 
        // Have node0 initiate a channel to node1 with aforementioned parameters
@@ -157,7 +157,7 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
        let feerate_per_kw = 253;
        let opt_anchors = false;
        push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
        let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
        let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
@@ -179,9 +179,15 @@ fn do_test_counterparty_no_reserve(send_from_initiator: bool) {
                let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
                let mut sender_node_per_peer_lock;
                let mut sender_node_peer_state_lock;
-               let mut chan = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
-               chan.holder_selected_channel_reserve_satoshis = 0;
-               chan.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               if send_from_initiator {
+                       let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+                       chan.context.holder_selected_channel_reserve_satoshis = 0;
+                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               } else {
+                       let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+                       chan.context.holder_selected_channel_reserve_satoshis = 0;
+                       chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+               }
        }
 
        let funding_tx = sign_funding_transaction(&nodes[0], &nodes[1], 100_000, temp_channel_id);
@@ -643,7 +649,7 @@ fn test_update_fee_that_funder_cannot_afford() {
        let channel_id = chan.2;
        let secp_ctx = Secp256k1::new();
        let default_config = UserConfig::default();
-       let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
+       let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
 
        let opt_anchors = false;
 
@@ -728,7 +734,7 @@ fn test_update_fee_that_funder_cannot_afford() {
                        commit_tx_keys.clone(),
                        non_buffer_feerate + 4,
                        &mut htlcs,
-                       &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
+                       &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
                );
                local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
        };
@@ -1125,10 +1131,8 @@ fn holding_cell_htlc_counting() {
        {
                unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, payment_hash_1,
                                RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)
-                       ), true, APIError::ChannelUnavailable { ref err },
-                       assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
+                       ), true, APIError::ChannelUnavailable { .. }, {});
                assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
        }
 
        // This should also be true if we try to forward a payment.
@@ -1351,16 +1355,12 @@ fn test_basic_channel_reserve() {
                RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap();
        match err {
                PaymentSendFailure::AllFailedResendSafe(ref fails) => {
-                       match &fails[0] {
-                               &APIError::ChannelUnavailable{ref err} =>
-                                       assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)),
-                               _ => panic!("Unexpected error variant"),
-                       }
+                       if let &APIError::ChannelUnavailable { .. } = &fails[0] {}
+                       else { panic!("Unexpected error variant"); }
                },
                _ => panic!("Unexpected error variant"),
        }
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 1);
 
        send_payment(&nodes[0], &vec![&nodes[1]], max_can_send);
 }
@@ -1393,6 +1393,7 @@ fn test_fee_spike_violation_fails_htlc() {
                payment_hash: payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
@@ -1459,11 +1460,11 @@ fn test_fee_spike_violation_fails_htlc() {
                        commitment_number,
                        95000,
                        local_chan_balance,
-                       local_chan.opt_anchors(), local_funding, remote_funding,
+                       local_chan.context.opt_anchors(), local_funding, remote_funding,
                        commit_tx_keys.clone(),
                        feerate_per_kw,
                        &mut vec![(accepted_htlc_info, ())],
-                       &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
+                       &local_chan.context.channel_transaction_parameters.as_counterparty_broadcastable()
                );
                local_chan_signer.sign_counterparty_commitment(&commitment_tx, Vec::new(), &secp_ctx).unwrap()
        };
@@ -1523,7 +1524,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
        let mut push_amt = 100_000_000;
        push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
 
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
 
        let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
 
@@ -1537,10 +1538,8 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
        // However one more HTLC should be significantly over the reserve amount and fail.
        unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
+               ), true, APIError::ChannelUnavailable { .. }, {});
        assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_string(), 1);
 }
 
 #[test]
@@ -1558,7 +1557,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
        // transaction fee with 0 HTLCs (183 sats)).
        let mut push_amt = 100_000_000;
        push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt);
 
        // Send four HTLCs to cover the initial push_msat buffer we're required to include
@@ -1584,6 +1583,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() {
                payment_hash: payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg);
@@ -1614,7 +1614,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        // transaction fee with 0 HTLCs (183 sats)).
        let mut push_amt = 100_000_000;
        push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt);
 
        let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
@@ -1635,8 +1635,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
        route.paths[0].hops[0].fee_msat += 1;
        unwrap_send_err!(nodes[1].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
+               ), true, APIError::ChannelUnavailable { .. }, {});
 }
 
 #[test]
@@ -1660,7 +1659,7 @@ fn test_chan_init_feerate_unaffordability() {
 
        // During open, we don't have a "counterparty channel reserve" to check against, so that
        // requirement only comes into play on the open_channel handling side.
-       push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
+       push_amt -= get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
        nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
        let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
        open_channel_msg.push_msat += 1;
@@ -1761,6 +1760,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
                payment_hash: our_payment_hash_1,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet,
+               skimmed_fee_msat: None,
        };
 
        nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
@@ -1786,7 +1786,7 @@ fn test_inbound_outbound_capacity_is_not_zero() {
        assert_eq!(channels0.len(), 1);
        assert_eq!(channels1.len(), 1);
 
-       let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
+       let reserve = get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
        assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
        assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
 
@@ -1844,10 +1844,8 @@ fn test_channel_reserve_holding_cell_htlcs() {
 
                unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                                RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-                       ), true, APIError::ChannelUnavailable { ref err },
-                       assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
+                       ), true, APIError::ChannelUnavailable { .. }, {});
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
        }
 
        // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
@@ -1918,8 +1916,7 @@ fn test_channel_reserve_holding_cell_htlcs() {
                let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]);
                unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                                RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-                       ), true, APIError::ChannelUnavailable { ref err },
-                       assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
+                       ), true, APIError::ChannelUnavailable { .. }, {});
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
        }
 
@@ -1949,10 +1946,8 @@ fn test_channel_reserve_holding_cell_htlcs() {
                route.paths[0].hops.last_mut().unwrap().fee_msat += 1;
                unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                                RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-                       ), true, APIError::ChannelUnavailable { ref err },
-                       assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)));
+                       ), true, APIError::ChannelUnavailable { .. }, {});
                assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-               nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put our balance under counterparty-announced channel reserve value", 2);
        }
 
        let (route_22, our_payment_hash_22, our_payment_preimage_22, our_payment_secret_22) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22);
@@ -3136,7 +3131,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use
                // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
                // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
                nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
-                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().holder_dust_limit_satoshis * 1000
+                       .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
        } else { 3000000 };
 
        let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
@@ -3418,6 +3413,7 @@ fn fail_backward_pending_htlc_upon_channel_failure() {
                        payment_hash,
                        cltv_expiry,
                        onion_routing_packet,
+                       skimmed_fee_msat: None,
                };
                nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_htlc);
        }
@@ -3628,8 +3624,8 @@ fn test_peer_disconnected_before_funding_broadcasted() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
-       check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
+       check_closed_event(&nodes[0], 1, ClosureReason::DisconnectedPeer, false);
+       check_closed_event(&nodes[1], 1, ClosureReason::DisconnectedPeer, false);
 }
 
 #[test]
@@ -4041,10 +4037,14 @@ fn test_drop_messages_peer_disconnect_dual_htlc() {
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
 
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
 
@@ -5000,7 +5000,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno
        assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
 
        let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
        // 0th HTLC:
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
        // 1st HTLC:
@@ -5735,9 +5735,6 @@ fn test_fail_holding_cell_htlc_upon_free() {
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
        nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 1 HTLC updates in channel {}", hex::encode(chan.2)), 1);
-       let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
-               hex::encode(our_payment_hash.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
 
        // Check that the payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -5826,9 +5823,6 @@ fn test_free_and_fail_holding_cell_htlcs() {
        chan_stat = get_channel_value_stat!(nodes[0], nodes[1], chan.2);
        assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0);
        nodes[0].logger.assert_log("lightning::ln::channel".to_string(), format!("Freeing holding cell with 2 HTLC updates in channel {}", hex::encode(chan.2)), 1);
-       let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put our balance under counterparty-announced channel reserve value ({}) in channel {}",
-               hex::encode(payment_hash_2.0), chan_stat.channel_reserve_msat, hex::encode(chan.2));
-       nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1);
 
        // Check that the second payment failed to be sent out.
        let events = nodes[0].node.get_and_clear_pending_events();
@@ -6037,10 +6031,8 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
 
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err)));
+               ), true, APIError::ChannelUnavailable { .. }, {});
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send less than their minimum HTLC value", 1);
 }
 
 #[test]
@@ -6117,7 +6109,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
        let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().counterparty_max_accepted_htlcs as u64;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
 
        // Fetch a route in advance as we will be unable to once we're unable to send.
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
@@ -6146,11 +6138,9 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment()
        }
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err)));
+               ), true, APIError::ChannelUnavailable { .. }, {});
 
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot push more than their max accepted HTLCs", 1);
 }
 
 #[test]
@@ -6172,11 +6162,8 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
        route.paths[0].hops[0].fee_msat =  max_in_flight + 1;
        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, our_payment_hash,
                        RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)
-               ), true, APIError::ChannelUnavailable { ref err },
-               assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
-
+               ), true, APIError::ChannelUnavailable { .. }, {});
        assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
-       nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send value that would put us over the max HTLC value in flight our peer will accept", 1);
 
        send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
 }
@@ -6195,7 +6182,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
-               htlc_minimum_msat = channel.get_holder_htlc_minimum_msat();
+               htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
        }
 
        let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
@@ -6276,6 +6263,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
                payment_hash: our_payment_hash,
                cltv_expiry: htlc_cltv,
                onion_routing_packet: onion_packet.clone(),
+               skimmed_fee_msat: None,
        };
 
        for i in 0..50 {
@@ -6361,10 +6349,14 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
        //Disconnect and Reconnect
        nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 1);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 1);
        nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]);
@@ -6793,7 +6785,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
        let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
 
        // We route 2 dust-HTLCs between A and B
        let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
@@ -6886,7 +6878,7 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
        let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
 
        let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
-               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+               .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
 
        let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
        let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
@@ -6969,8 +6961,8 @@ fn test_user_configurable_csv_delay() {
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
        let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 
-       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
-       if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in OutboundV1Channel::new()
+       if let Err(error) = OutboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[1].node.init_features(), 1000000, 1000000, 0,
                &low_our_to_self_config, 0, 42)
        {
@@ -6980,11 +6972,11 @@ fn test_user_configurable_csv_delay() {
                }
        } else { assert!(false) }
 
-       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
+       // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &low_our_to_self_config, 0, &nodes[0].logger, 42)
        {
@@ -7012,11 +7004,11 @@ fn test_user_configurable_csv_delay() {
        } else { panic!(); }
        check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: reason_msg });
 
-       // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
+       // We test msg.to_self_delay <= config.their_to_self_delay is enforced in InboundV1Channel::new()
        nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
        let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
        open_channel.to_self_delay = 200;
-       if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
+       if let Err(error) = InboundV1Channel::new(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
                &nodes[0].keys_manager, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &nodes[0].node.channel_type_features(), &nodes[1].node.init_features(), &open_channel, 0,
                &high_their_to_self_config, 0, &nodes[0].logger, 42)
        {
@@ -7127,10 +7119,14 @@ fn test_announce_disable_channels() {
                }
        }
        // Reconnect peers
-       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap();
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       }, true).unwrap();
        let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
        assert_eq!(reestablish_1.len(), 3);
-       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       }, false).unwrap();
        let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
        assert_eq!(reestablish_2.len(), 3);
 
@@ -7883,7 +7879,7 @@ fn test_reject_funding_before_inbound_channel_accepted() {
        let accept_chan_msg = {
                let mut node_1_per_peer_lock;
                let mut node_1_peer_state_lock;
-               let channel =  get_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
+               let channel =  get_inbound_v1_channel_ref!(&nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, temp_channel_id);
                channel.get_accept_channel_message()
        };
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
@@ -8943,16 +8939,16 @@ fn test_duplicate_chan_id() {
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
        create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event
 
-       let funding_created = {
+       let (_, funding_created) = {
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                // Once we call `get_outbound_funding_created` the channel has a duplicate channel_id as
                // another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
                // try to create another channel. Instead, we drop the channel entirely here (leaving the
                // channelmanager in a possibly nonsense state instead).
-               let mut as_chan = a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
+               let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
-               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
+               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
@@ -9457,7 +9453,7 @@ fn test_keysend_payments_to_public_node() {
        let payer_pubkey = nodes[0].node.get_our_node_id();
        let payee_pubkey = nodes[1].node.get_our_node_id();
        let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
+               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
                final_value_msat: 10000,
        };
        let scorer = test_utils::TestScorer::new();
@@ -9488,7 +9484,7 @@ fn test_keysend_payments_to_private_node() {
 
        let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
        let route_params = RouteParameters {
-               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
+               payment_params: PaymentParameters::for_keysend(payee_pubkey, 40, false),
                final_value_msat: 10000,
        };
        let network_graph = nodes[0].network_graph.clone();
@@ -9618,8 +9614,8 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        if on_holder_tx {
                let mut node_0_per_peer_lock;
                let mut node_0_peer_state_lock;
-               let mut chan = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
-               chan.holder_dust_limit_satoshis = 546;
+               let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
+               chan.context.holder_dust_limit_satoshis = 546;
        }
 
        nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
@@ -9643,7 +9639,7 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
                let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
                let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
                let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
-               chan.get_dust_buffer_feerate(None) as u64
+               chan.context.get_dust_buffer_feerate(None) as u64
        };
        let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(opt_anchors) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
        let dust_outbound_htlc_on_holder_tx: u64 = config.channel_config.max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
@@ -9693,23 +9689,15 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e
        if exposure_breach_event == ExposureEvent::AtHTLCForward {
                route.paths[0].hops.last_mut().unwrap().fee_msat =
                        if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 };
-               let mut config = UserConfig::default();
                // With default dust exposure: 5000 sats
                if on_holder_tx {
-                       let dust_outbound_overflow = dust_outbound_htlc_on_holder_tx_msat * (dust_outbound_htlc_on_holder_tx + 1);
-                       let dust_inbound_overflow = dust_inbound_htlc_on_holder_tx_msat * dust_inbound_htlc_on_holder_tx + dust_outbound_htlc_on_holder_tx_msat;
                        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
                                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
-                               ), true, APIError::ChannelUnavailable { ref err },
-                               assert_eq!(err, &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx", if dust_outbound_balance { dust_outbound_overflow } else { dust_inbound_overflow }, config.channel_config.max_dust_htlc_exposure_msat)));
+                               ), true, APIError::ChannelUnavailable { .. }, {});
                } else {
                        unwrap_send_err!(nodes[0].node.send_payment_with_route(&route, payment_hash,
                                        RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)
-                               ), true, APIError::ChannelUnavailable { ref err },
-                               assert_eq!(err,
-                                       &format!("Cannot send value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
-                                               dust_htlc_on_counterparty_tx_msat * (dust_htlc_on_counterparty_tx - 1) + dust_htlc_on_counterparty_tx_msat + 1,
-                                               config.channel_config.max_dust_htlc_exposure_msat)));
+                               ), true, APIError::ChannelUnavailable { .. }, {});
                }
        } else if exposure_breach_event == ExposureEvent::AtHTLCReception {
                let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], if on_holder_tx { dust_inbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 });
@@ -9944,3 +9932,132 @@ fn test_payment_with_custom_min_cltv_expiry_delta() {
        do_payment_with_custom_min_final_cltv_expiry(true, false);
        do_payment_with_custom_min_final_cltv_expiry(true, true);
 }
+
+#[test]
+fn test_disconnects_peer_awaiting_response_ticks() {
+       // Tests that nodes which are awaiting on a response critical for channel responsiveness
+       // disconnect their counterparty after `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       let mut chanmon_cfgs = create_chanmon_cfgs(2);
+       let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+       let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+       let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+       // Asserts a disconnect event is queued to the user.
+       let check_disconnect_event = |node: &Node, should_disconnect: bool| {
+               let disconnect_event = node.node.get_and_clear_pending_msg_events().iter().find_map(|event|
+                       if let MessageSendEvent::HandleError { action, .. } = event {
+                               if let msgs::ErrorAction::DisconnectPeerWithWarning { .. } = action {
+                                       Some(())
+                               } else {
+                                       None
+                               }
+                       } else {
+                               None
+                       }
+               );
+               assert_eq!(disconnect_event.is_some(), should_disconnect);
+       };
+
+       // Fires timer ticks ensuring we only attempt to disconnect peers after reaching
+       // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       let check_disconnect = |node: &Node| {
+               // No disconnect without any timer ticks.
+               check_disconnect_event(node, false);
+
+               // No disconnect with 1 timer tick less than required.
+               for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS - 1 {
+                       node.node.timer_tick_occurred();
+                       check_disconnect_event(node, false);
+               }
+
+               // Disconnect after reaching the required ticks.
+               node.node.timer_tick_occurred();
+               check_disconnect_event(node, true);
+
+               // Disconnect again on the next tick if the peer hasn't been disconnected yet.
+               node.node.timer_tick_occurred();
+               check_disconnect_event(node, true);
+       };
+
+       create_chan_between_nodes(&nodes[0], &nodes[1]);
+
+       // We'll start by performing a fee update with Alice (nodes[0]) on the channel.
+       *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2;
+       nodes[0].node.timer_tick_occurred();
+       check_added_monitors!(&nodes[0], 1);
+       let alice_fee_update = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id());
+       nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), alice_fee_update.update_fee.as_ref().unwrap());
+       nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &alice_fee_update.commitment_signed);
+       check_added_monitors!(&nodes[1], 1);
+
+       // This will prompt Bob (nodes[1]) to respond with his `CommitmentSigned` and `RevokeAndACK`.
+       let (bob_revoke_and_ack, bob_commitment_signed) = get_revoke_commit_msgs!(&nodes[1], nodes[0].node.get_our_node_id());
+       nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bob_revoke_and_ack);
+       check_added_monitors!(&nodes[0], 1);
+       nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bob_commitment_signed);
+       check_added_monitors(&nodes[0], 1);
+
+       // Alice then needs to send her final `RevokeAndACK` to complete the commitment dance. We
+       // pretend Bob hasn't received the message and check whether he'll disconnect Alice after
+       // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       let alice_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+       check_disconnect(&nodes[1]);
+
+       // Now, we'll reconnect them to test awaiting a `ChannelReestablish` message.
+       //
+       // Note that since the commitment dance didn't complete above, Alice is expected to resend her
+       // final `RevokeAndACK` to Bob to complete it.
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
+       let bob_init = msgs::Init {
+               features: nodes[1].node.init_features(), networks: None, remote_network_address: None
+       };
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &bob_init, true).unwrap();
+       let alice_init = msgs::Init {
+               features: nodes[0].node.init_features(), networks: None, remote_network_address: None
+       };
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &alice_init, true).unwrap();
+
+       // Upon reconnection, Alice sends her `ChannelReestablish` to Bob. Alice, however, hasn't
+       // received Bob's yet, so she should disconnect him after reaching
+       // `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       let alice_channel_reestablish = get_event_msg!(
+               nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()
+       );
+       nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &alice_channel_reestablish);
+       check_disconnect(&nodes[0]);
+
+       // Bob now sends his `ChannelReestablish` to Alice to resume the channel and consider it "live".
+       let bob_channel_reestablish = nodes[1].node.get_and_clear_pending_msg_events().iter().find_map(|event|
+               if let MessageSendEvent::SendChannelReestablish { node_id, msg } = event {
+                       assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                       Some(msg.clone())
+               } else {
+                       None
+               }
+       ).unwrap();
+       nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bob_channel_reestablish);
+
+       // Sanity check that Alice won't disconnect Bob since she's no longer waiting for any messages.
+       for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
+               nodes[0].node.timer_tick_occurred();
+               check_disconnect_event(&nodes[0], false);
+       }
+
+       // However, Bob is still waiting on Alice's `RevokeAndACK`, so he should disconnect her after
+       // reaching `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`.
+       check_disconnect(&nodes[1]);
+
+       // Finally, have Bob process the last message.
+       nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &alice_revoke_and_ack);
+       check_added_monitors(&nodes[1], 1);
+
+       // At this point, neither node should attempt to disconnect each other, since they aren't
+       // waiting on any messages.
+       for node in &nodes {
+               for _ in 0..DISCONNECT_PEER_AWAITING_RESPONSE_TICKS {
+                       node.node.timer_tick_occurred();
+                       check_disconnect_event(node, false);
+               }
+       }
+}