Merge pull request #208 from TheBlueMatt/2018-10-reconnect-fixes
[rust-lightning] / src / ln / channelmanager.rs
index bebb4e776853f630224ad053e9beac1a1928a537..9cca16a4278e0b66a2abf22bec64c46da656f237 100644 (file)
@@ -467,7 +467,7 @@ impl ChannelManager {
        /// pending HTLCs, the channel will be closed on chain.
        ///
        /// May generate a SendShutdown event on success, which should be relayed.
-       pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), HandleError> {
+       pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let (mut res, node_id, chan_option) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = channel_state_lock.borrow_parts();
@@ -481,7 +481,7 @@ impl ChannelManager {
                                                (res, chan_entry.get().get_their_node_id(), Some(chan_entry.remove_entry().1))
                                        } else { (res, chan_entry.get().get_their_node_id(), None) }
                                },
-                               hash_map::Entry::Vacant(_) => return Err(HandleError{err: "No such channel", action: None})
+                               hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
                        }
                };
                for htlc_source in res.1.drain(..) {
@@ -1525,7 +1525,8 @@ impl ChannelManager {
                                                //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
                                        }
-                                       chan.accept_channel(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                       chan.accept_channel(&msg)
+                                               .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.temporary_channel_id))?;
                                        (chan.get_value_satoshis(), chan.get_funding_redeemscript().to_v0_p2wsh(), chan.get_user_id())
                                },
                                //TODO: same as above
@@ -1615,7 +1616,8 @@ impl ChannelManager {
                                        //TODO: here and below MsgHandleErrInternal, #153 case
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               chan.funding_locked(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                               chan.funding_locked(&msg)
+                                       .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
                                return Ok(self.get_announcement_sigs(chan));
                        },
                        None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
@@ -1735,7 +1737,8 @@ impl ChannelManager {
                                        //TODO: here and below MsgHandleErrInternal, #153 case
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               chan.update_fulfill_htlc(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?.clone()
+                               chan.update_fulfill_htlc(&msg)
+                                       .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?.clone()
                        },
                        None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
                };
@@ -1751,7 +1754,8 @@ impl ChannelManager {
                                        //TODO: here and below MsgHandleErrInternal, #153 case
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))
+                               chan.update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() })
+                                       .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))
                        },
                        None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
                }?;
@@ -1823,7 +1827,11 @@ impl ChannelManager {
                                        //TODO: here and below MsgHandleErrInternal, #153 case
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                }
-                               chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                               if (msg.failure_code & 0x8000) != 0 {
+                                       return Err(MsgHandleErrInternal::send_err_msg_close_chan("Got update_fail_malformed_htlc with BADONION set", msg.channel_id));
+                               }
+                               chan.update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() })
+                                       .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
                                Ok(())
                        },
                        None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
@@ -1931,7 +1939,7 @@ impl ChannelManager {
 
                                        let our_node_id = self.get_our_node_id();
                                        let (announcement, our_bitcoin_sig) = chan.get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone())
-                                               .map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                               .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
 
                                        let were_node_one = announcement.node_id_1 == our_node_id;
                                        let msghash = Message::from_slice(&Sha256dHash::from_data(&announcement.encode()[..])[..]).unwrap();
@@ -1965,7 +1973,8 @@ impl ChannelManager {
                                        if chan.get_their_node_id() != *their_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                       let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg)
+                                               .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
                                        (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor)
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
@@ -2556,7 +2565,17 @@ mod tests {
        }
 
        fn create_chan_between_nodes(node_a: &Node, node_b: &Node) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
-               node_a.node.create_channel(node_b.node.get_our_node_id(), 100000, 10001, 42).unwrap();
+               create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001)
+       }
+
+       fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+               let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat);
+               let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
+               (announcement, as_update, bs_update, channel_id, tx)
+       }
+
+       fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
+               node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
 
                let events_1 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_1.len(), 1);
@@ -2578,7 +2597,7 @@ mod tests {
                assert_eq!(events_2.len(), 1);
                match events_2[0] {
                        Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
-                               assert_eq!(*channel_value_satoshis, 100000);
+                               assert_eq!(*channel_value_satoshis, channel_value);
                                assert_eq!(user_channel_id, 42);
 
                                tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
@@ -2628,47 +2647,53 @@ mod tests {
                        _ => panic!("Unexpected event"),
                };
 
-               confirm_transaction(&node_a.chain_monitor, &tx, chan_id);
-               let events_5 = node_a.node.get_and_clear_pending_events();
+               confirm_transaction(&node_b.chain_monitor, &tx, chan_id);
+               let events_5 = node_b.node.get_and_clear_pending_events();
                assert_eq!(events_5.len(), 1);
                match events_5[0] {
                        Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
-                               assert_eq!(*node_id, node_b.node.get_our_node_id());
+                               assert_eq!(*node_id, node_a.node.get_our_node_id());
                                assert!(announcement_sigs.is_none());
-                               node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), msg).unwrap()
+                               node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap()
                        },
                        _ => panic!("Unexpected event"),
                };
 
                let channel_id;
 
-               confirm_transaction(&node_b.chain_monitor, &tx, chan_id);
-               let events_6 = node_b.node.get_and_clear_pending_events();
+               confirm_transaction(&node_a.chain_monitor, &tx, chan_id);
+               let events_6 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_6.len(), 1);
-               let as_announcement_sigs = match events_6[0] {
+               (match events_6[0] {
                        Event::SendFundingLocked { ref node_id, ref msg, ref announcement_sigs } => {
-                               assert_eq!(*node_id, node_a.node.get_our_node_id());
                                channel_id = msg.channel_id.clone();
-                               let as_announcement_sigs = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), msg).unwrap().unwrap();
-                               node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &(*announcement_sigs).clone().unwrap()).unwrap();
-                               as_announcement_sigs
+                               assert_eq!(*node_id, node_b.node.get_our_node_id());
+                               (msg.clone(), announcement_sigs.clone().unwrap())
                        },
                        _ => panic!("Unexpected event"),
+               }, channel_id, tx)
+       }
+
+       fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
+               let bs_announcement_sigs = {
+                       let bs_announcement_sigs = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap().unwrap();
+                       node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap();
+                       bs_announcement_sigs
                };
 
-               let events_7 = node_a.node.get_and_clear_pending_events();
+               let events_7 = node_b.node.get_and_clear_pending_events();
                assert_eq!(events_7.len(), 1);
-               let (announcement, as_update) = match events_7[0] {
+               let (announcement, bs_update) = match events_7[0] {
                        Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
                                (msg, update_msg)
                        },
                        _ => panic!("Unexpected event"),
                };
 
-               node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_announcement_sigs).unwrap();
-               let events_8 = node_b.node.get_and_clear_pending_events();
+               node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap();
+               let events_8 = node_a.node.get_and_clear_pending_events();
                assert_eq!(events_8.len(), 1);
-               let bs_update = match events_8[0] {
+               let as_update = match events_8[0] {
                        Event::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
                                assert!(*announcement == *msg);
                                update_msg
@@ -2678,11 +2703,15 @@ mod tests {
 
                *node_a.network_chan_count.borrow_mut() += 1;
 
-               ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone(), channel_id, tx)
+               ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
        }
 
        fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
-               let chan_announcement = create_chan_between_nodes(&nodes[a], &nodes[b]);
+               create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001)
+       }
+
+       fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+               let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat);
                for node in nodes {
                        assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
                        node.router.handle_channel_update(&chan_announcement.1).unwrap();
@@ -3870,6 +3899,281 @@ mod tests {
                }
        }
 
+       #[test]
+       fn channel_reserve_test() {
+               use util::rng;
+               use std::sync::atomic::Ordering;
+               use ln::msgs::HandleError;
+
+               macro_rules! get_channel_value_stat {
+                       ($node: expr, $channel_id: expr) => {{
+                               let chan_lock = $node.node.channel_state.lock().unwrap();
+                               let chan = chan_lock.by_id.get(&$channel_id).unwrap();
+                               chan.get_value_stat()
+                       }}
+               }
+
+               let mut nodes = create_network(3);
+               let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001);
+               let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001);
+
+               let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
+               let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
+
+               let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
+               let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
+
+               macro_rules! get_route_and_payment_hash {
+                       ($recv_value: expr) => {{
+                               let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap();
+                               let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+                               (route, payment_hash, payment_preimage)
+                       }}
+               };
+
+               macro_rules! expect_pending_htlcs_forwardable {
+                       ($node: expr) => {{
+                               let events = $node.node.get_and_clear_pending_events();
+                               assert_eq!(events.len(), 1);
+                               match events[0] {
+                                       Event::PendingHTLCsForwardable { .. } => { },
+                                       _ => panic!("Unexpected event"),
+                               };
+                               $node.node.channel_state.lock().unwrap().next_forward = Instant::now();
+                               $node.node.process_pending_htlc_forwards();
+                       }}
+               };
+
+               macro_rules! expect_forward {
+                       ($node: expr) => {{
+                               let mut events = $node.node.get_and_clear_pending_events();
+                               assert_eq!(events.len(), 1);
+                               check_added_monitors!($node, 1);
+                               let payment_event = SendEvent::from_event(events.remove(0));
+                               payment_event
+                       }}
+               }
+
+               macro_rules! expect_payment_received {
+                       ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
+                               let events = $node.node.get_and_clear_pending_events();
+                               assert_eq!(events.len(), 1);
+                               match events[0] {
+                                       Event::PaymentReceived { ref payment_hash, amt } => {
+                                               assert_eq!($expected_payment_hash, *payment_hash);
+                                               assert_eq!($expected_recv_value, amt);
+                                       },
+                                       _ => panic!("Unexpected event"),
+                               }
+                       }
+               };
+
+               let feemsat = 239; // somehow we know?
+               let total_fee_msat = (nodes.len() - 2) as u64 * 239;
+
+               let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat;
+
+               // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
+               {
+                       let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
+                       assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
+                       let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
+                       match err {
+                               APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
+                               _ => panic!("Unknown error variants"),
+                       }
+               }
+
+               let mut htlc_id = 0;
+               // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
+               // nodes[0]'s wealth
+               loop {
+                       let amt_msat = recv_value_0 + total_fee_msat;
+                       if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat {
+                               break;
+                       }
+                       send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
+                       htlc_id += 1;
+
+                       let (stat01_, stat11_, stat12_, stat22_) = (
+                               get_channel_value_stat!(nodes[0], chan_1.2),
+                               get_channel_value_stat!(nodes[1], chan_1.2),
+                               get_channel_value_stat!(nodes[1], chan_2.2),
+                               get_channel_value_stat!(nodes[2], chan_2.2),
+                       );
+
+                       assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
+                       assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
+                       assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
+                       assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
+                       stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
+               }
+
+               {
+                       let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat;
+                       // attempt to get channel_reserve violation
+                       let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
+                       let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
+                       match err {
+                               APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+                               _ => panic!("Unknown error variants"),
+                       }
+               }
+
+               // adding pending output
+               let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2;
+               let amt_msat_1 = recv_value_1 + total_fee_msat;
+
+               let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1);
+               let payment_event_1 = {
+                       nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap();
+                       check_added_monitors!(nodes[0], 1);
+
+                       let mut events = nodes[0].node.get_and_clear_pending_events();
+                       assert_eq!(events.len(), 1);
+                       SendEvent::from_event(events.remove(0))
+               };
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap();
+
+               // channel reserve test with htlc pending output > 0
+               let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat;
+               {
+                       let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
+                       match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
+                               APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+                               _ => panic!("Unknown error variants"),
+                       }
+               }
+
+               {
+                       // test channel_reserve test on nodes[1] side
+                       let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
+
+                       // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc()
+                       let secp_ctx = Secp256k1::new();
+                       let session_priv = SecretKey::from_slice(&secp_ctx, &{
+                               let mut session_key = [0; 32];
+                               rng::fill_bytes(&mut session_key);
+                               session_key
+                       }).expect("RNG is bad!");
+
+                       let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+                       let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+                       let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height).unwrap();
+                       let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+                       let msg = msgs::UpdateAddHTLC {
+                               channel_id: chan_1.2,
+                               htlc_id,
+                               amount_msat: htlc_msat,
+                               payment_hash: our_payment_hash,
+                               cltv_expiry: htlc_cltv,
+                               onion_routing_packet: onion_packet,
+                       };
+
+                       let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap();
+                       match err {
+                               HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
+                       }
+               }
+
+               // split the rest to test holding cell
+               let recv_value_21 = recv_value_2/2;
+               let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat;
+               {
+                       let stat = get_channel_value_stat!(nodes[0], chan_1.2);
+                       assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat);
+               }
+
+               // now see if they go through on both sides
+               let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21);
+               // but this will stuck in the holding cell
+               nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap();
+               check_added_monitors!(nodes[0], 0);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 0);
+
+               // test with outbound holding cell amount > 0
+               {
+                       let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
+                       match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
+                               APIError::RouteError{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+                               _ => panic!("Unknown error variants"),
+                       }
+               }
+
+               let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
+               // this will also stuck in the holding cell
+               nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap();
+               check_added_monitors!(nodes[0], 0);
+               let events = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 0);
+
+               // flush the pending htlc
+               let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap();
+               check_added_monitors!(nodes[1], 1);
+
+               let commitment_update_2 = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().unwrap();
+               check_added_monitors!(nodes[0], 1);
+               let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
+               assert!(bs_none.is_none());
+               check_added_monitors!(nodes[0], 1);
+               assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+               check_added_monitors!(nodes[1], 1);
+
+               expect_pending_htlcs_forwardable!(nodes[1]);
+
+               let ref payment_event_11 = expect_forward!(nodes[1]);
+               nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap();
+               commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
+
+               expect_pending_htlcs_forwardable!(nodes[2]);
+               expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1);
+
+               // flush the htlcs in the holding cell
+               assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap();
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap();
+               commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
+               expect_pending_htlcs_forwardable!(nodes[1]);
+
+               let ref payment_event_3 = expect_forward!(nodes[1]);
+               assert_eq!(payment_event_3.msgs.len(), 2);
+               nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap();
+               nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap();
+
+               commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
+               expect_pending_htlcs_forwardable!(nodes[2]);
+
+               let events = nodes[2].node.get_and_clear_pending_events();
+               assert_eq!(events.len(), 2);
+               match events[0] {
+                       Event::PaymentReceived { ref payment_hash, amt } => {
+                               assert_eq!(our_payment_hash_21, *payment_hash);
+                               assert_eq!(recv_value_21, amt);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+               match events[1] {
+                       Event::PaymentReceived { ref payment_hash, amt } => {
+                               assert_eq!(our_payment_hash_22, *payment_hash);
+                               assert_eq!(recv_value_22, amt);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
+               claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
+               claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
+               claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
+
+               let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat);
+               let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
+               assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
+               assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat);
+
+               let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
+               assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22);
+       }
+
        #[test]
        fn channel_monitor_network_test() {
                // Simple test which builds a network of ChannelManagers, connects them to each other, and
@@ -4370,7 +4674,9 @@ mod tests {
                assert_eq!(channel_state.short_to_id.len(), 0);
        }
 
-       fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_claims: (usize, usize), pending_htlc_fails: (usize, usize)) {
+       /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
+       /// for claims/fails they are separated out.
+       fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
                let reestablish_1 = node_a.node.peer_connected(&node_b.node.get_our_node_id());
                let reestablish_2 = node_b.node.peer_connected(&node_a.node.get_our_node_id());
 
@@ -4378,7 +4684,7 @@ mod tests {
                for msg in reestablish_1 {
                        resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap());
                }
-               if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 {
+               if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
                        check_added_monitors!(node_b, 1);
                } else {
                        check_added_monitors!(node_b, 0);
@@ -4388,29 +4694,43 @@ mod tests {
                for msg in reestablish_2 {
                        resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap());
                }
-               if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 {
+               if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
                        check_added_monitors!(node_a, 1);
                } else {
                        check_added_monitors!(node_a, 0);
                }
 
                // We dont yet support both needing updates, as that would require a different commitment dance:
-               assert!((pending_htlc_claims.0 == 0 && pending_htlc_fails.0 == 0) || (pending_htlc_claims.1 == 0 && pending_htlc_fails.1 == 0));
+               assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
+                       (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
 
                for chan_msgs in resp_1.drain(..) {
                        if pre_all_htlcs {
-                               let _announcement_sigs_opt = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
+                               let a = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap());
+                               let _announcement_sigs_opt = a.unwrap();
                                //TODO: Test announcement_sigs re-sending when we've implemented it
                        } else {
                                assert!(chan_msgs.0.is_none());
                        }
-                       assert!(chan_msgs.1.is_none());
-                       if pending_htlc_claims.0 != 0 || pending_htlc_fails.0 != 0 {
+                       if pending_raa.0 {
+                               assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
+                               check_added_monitors!(node_a, 1);
+                       } else {
+                               assert!(chan_msgs.1.is_none());
+                       }
+                       if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
                                let commitment_update = chan_msgs.2.unwrap();
-                               assert!(commitment_update.update_add_htlcs.is_empty()); // We can't relay while disconnected
-                               assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0);
-                               assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0);
+                               if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+                                       assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
+                               } else {
+                                       assert!(commitment_update.update_add_htlcs.is_empty());
+                               }
+                               assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
+                               assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
                                assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+                               for update_add in commitment_update.update_add_htlcs {
+                                       node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap();
+                               }
                                for update_fulfill in commitment_update.update_fulfill_htlcs {
                                        node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
                                }
@@ -4418,7 +4738,15 @@ mod tests {
                                        node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
                                }
 
-                               commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
+                               if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+                                       commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
+                               } else {
+                                       let (as_revoke_and_ack, as_commitment_signed) = node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+                                       check_added_monitors!(node_a, 1);
+                                       assert!(as_commitment_signed.is_none());
+                                       assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+                                       check_added_monitors!(node_b, 1);
+                               }
                        } else {
                                assert!(chan_msgs.2.is_none());
                        }
@@ -4431,13 +4759,23 @@ mod tests {
                        } else {
                                assert!(chan_msgs.0.is_none());
                        }
-                       assert!(chan_msgs.1.is_none());
-                       if pending_htlc_claims.1 != 0 || pending_htlc_fails.1 != 0 {
+                       if pending_raa.1 {
+                               assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
+                               check_added_monitors!(node_b, 1);
+                       } else {
+                               assert!(chan_msgs.1.is_none());
+                       }
+                       if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
                                let commitment_update = chan_msgs.2.unwrap();
-                               assert!(commitment_update.update_add_htlcs.is_empty()); // We can't relay while disconnected
-                               assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0);
-                               assert_eq!(commitment_update.update_fail_htlcs.len(), pending_htlc_fails.0);
+                               if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+                                       assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
+                               }
+                               assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
+                               assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
                                assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+                               for update_add in commitment_update.update_add_htlcs {
+                                       node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap();
+                               }
                                for update_fulfill in commitment_update.update_fulfill_htlcs {
                                        node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
                                }
@@ -4445,7 +4783,15 @@ mod tests {
                                        node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
                                }
 
-                               commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
+                               if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+                                       commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
+                               } else {
+                                       let (bs_revoke_and_ack, bs_commitment_signed) = node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+                                       check_added_monitors!(node_b, 1);
+                                       assert!(bs_commitment_signed.is_none());
+                                       assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+                                       check_added_monitors!(node_a, 1);
+                               }
                        } else {
                                assert!(chan_msgs.2.is_none());
                        }
@@ -4461,7 +4807,7 @@ mod tests {
 
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0));
+               reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
                let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
                let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
@@ -4470,7 +4816,7 @@ mod tests {
 
                nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
                nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
-               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0));
+               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
 
                let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
                let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
@@ -4483,7 +4829,7 @@ mod tests {
                claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
                fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
 
-               reconnect_nodes(&nodes[0], &nodes[1], false, (1, 0), (1, 0));
+               reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
                {
                        let events = nodes[0].node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 2);
@@ -4505,6 +4851,193 @@ mod tests {
                fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
        }
 
+       fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
+               // Test that we can reconnect when in-flight HTLC updates get dropped
+               let mut nodes = create_network(2);
+               if messages_delivered == 0 {
+                       create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
+                       // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
+               } else {
+                       create_announced_chan_between_nodes(&nodes, 0, 1);
+               }
+
+               let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+               let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+
+               let payment_event = {
+                       nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
+                       check_added_monitors!(nodes[0], 1);
+
+                       let mut events = nodes[0].node.get_and_clear_pending_events();
+                       assert_eq!(events.len(), 1);
+                       SendEvent::from_event(events.remove(0))
+               };
+               assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
+
+               if messages_delivered < 2 {
+                       // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
+               } else {
+                       nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+                       let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+                       check_added_monitors!(nodes[1], 1);
+
+                       if messages_delivered >= 3 {
+                               assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+                               check_added_monitors!(nodes[0], 1);
+
+                               if messages_delivered >= 4 {
+                                       let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed.unwrap()).unwrap();
+                                       assert!(as_commitment_signed.is_none());
+                                       check_added_monitors!(nodes[0], 1);
+
+                                       if messages_delivered >= 5 {
+                                               assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+                                               check_added_monitors!(nodes[1], 1);
+                                       }
+                               }
+                       }
+               }
+
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               if messages_delivered < 2 {
+                       // Even if the funding_locked messages get exchanged, as long as nothing further was
+                       // received on either side, both sides will need to resend them.
+                       reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
+               } else if messages_delivered == 2 {
+                       // nodes[0] still wants its RAA + commitment_signed
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
+               } else if messages_delivered == 3 {
+                       // nodes[0] still wants its commitment_signed
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               } else if messages_delivered == 4 {
+                       // nodes[1] still wants its final RAA
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+               } else if messages_delivered == 5 {
+                       // Everything was delivered...
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               }
+
+               let events_1 = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(events_1.len(), 1);
+               match events_1[0] {
+                       Event::PendingHTLCsForwardable { .. } => { },
+                       _ => panic!("Unexpected event"),
+               };
+
+               nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
+               nodes[1].node.process_pending_htlc_forwards();
+
+               let events_2 = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(events_2.len(), 1);
+               match events_2[0] {
+                       Event::PaymentReceived { ref payment_hash, amt } => {
+                               assert_eq!(payment_hash_1, *payment_hash);
+                               assert_eq!(amt, 1000000);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
+               nodes[1].node.claim_funds(payment_preimage_1);
+               check_added_monitors!(nodes[1], 1);
+
+               let events_3 = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(events_3.len(), 1);
+               let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
+                       Event::UpdateHTLCs { ref node_id, ref updates } => {
+                               assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                               assert!(updates.update_add_htlcs.is_empty());
+                               assert!(updates.update_fail_htlcs.is_empty());
+                               assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+                               assert!(updates.update_fail_malformed_htlcs.is_empty());
+                               assert!(updates.update_fee.is_none());
+                               (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
+                       },
+                       _ => panic!("Unexpected event"),
+               };
+
+               if messages_delivered >= 1 {
+                       nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap();
+
+                       let events_4 = nodes[0].node.get_and_clear_pending_events();
+                       assert_eq!(events_4.len(), 1);
+                       match events_4[0] {
+                               Event::PaymentSent { ref payment_preimage } => {
+                                       assert_eq!(payment_preimage_1, *payment_preimage);
+                               },
+                               _ => panic!("Unexpected event"),
+                       }
+
+                       if messages_delivered >= 2 {
+                               let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
+                               check_added_monitors!(nodes[0], 1);
+
+                               if messages_delivered >= 3 {
+                                       assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+                                       check_added_monitors!(nodes[1], 1);
+
+                                       if messages_delivered >= 4 {
+                                               let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
+                                               assert!(bs_commitment_signed.is_none());
+                                               check_added_monitors!(nodes[1], 1);
+
+                                               if messages_delivered >= 5 {
+                                                       assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+                                                       check_added_monitors!(nodes[0], 1);
+                                               }
+                                       }
+                               }
+                       }
+               }
+
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+               if messages_delivered < 2 {
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+                       //TODO: Deduplicate PaymentSent events, then enable this if:
+                       //if messages_delivered < 1 {
+                               let events_4 = nodes[0].node.get_and_clear_pending_events();
+                               assert_eq!(events_4.len(), 1);
+                               match events_4[0] {
+                                       Event::PaymentSent { ref payment_preimage } => {
+                                               assert_eq!(payment_preimage_1, *payment_preimage);
+                                       },
+                                       _ => panic!("Unexpected event"),
+                               }
+                       //}
+               } else if messages_delivered == 2 {
+                       // nodes[0] still wants its RAA + commitment_signed
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
+               } else if messages_delivered == 3 {
+                       // nodes[0] still wants its commitment_signed
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
+               } else if messages_delivered == 4 {
+                       // nodes[1] still wants its final RAA
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+               } else if messages_delivered == 5 {
+                       // Everything was delivered...
+                       reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+               }
+
+               // Channel should still work fine...
+               let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
+               claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+       }
+
+       #[test]
+       fn test_drop_messages_peer_disconnect_a() {
+               do_test_drop_messages_peer_disconnect(0);
+               do_test_drop_messages_peer_disconnect(1);
+               do_test_drop_messages_peer_disconnect(2);
+       }
+
+       #[test]
+       fn test_drop_messages_peer_disconnect_b() {
+               do_test_drop_messages_peer_disconnect(3);
+               do_test_drop_messages_peer_disconnect(4);
+               do_test_drop_messages_peer_disconnect(5);
+       }
+
        #[test]
        fn test_invalid_channel_announcement() {
                //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs