Move channel monitor updates inside the channel_state lock
[rust-lightning] / src / ln / channelmanager.rs
index 7389a678bee3b5a09971c18c8dd4bf590a9213e6..dd6c7357cb1a88be312e95f56a9587ec9e881a7f 100644 (file)
@@ -26,7 +26,7 @@ use ln::channel::{Channel, ChannelError, ChannelKeys};
 use ln::channelmonitor::{ManyChannelMonitor, CLTV_CLAIM_BUFFER, HTLC_FAIL_TIMEOUT_BLOCKS};
 use ln::router::{Route,RouteHop};
 use ln::msgs;
-use ln::msgs::{HandleError,ChannelMessageHandler};
+use ln::msgs::{ChannelMessageHandler, HandleError, RAACommitmentOrder};
 use util::{byte_utils, events, internal_traits, rng};
 use util::sha2::Sha256;
 use util::ser::{Readable, Writeable};
@@ -1096,7 +1096,7 @@ impl ChannelManager {
                let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height)?;
                let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
 
-               let (first_hop_node_id, (update_add, commitment_signed, chan_monitor)) = {
+               let (first_hop_node_id, update_add, commitment_signed) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = channel_state_lock.borrow_parts();
 
@@ -1113,25 +1113,31 @@ impl ChannelManager {
                                if !chan.is_live() {
                                        return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected!"});
                                }
-                               chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+                               match chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
                                        route: route.clone(),
                                        session_priv: session_priv.clone(),
                                        first_hop_htlc_msat: htlc_msat,
-                               }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})?
+                               }, onion_packet).map_err(|he| APIError::ChannelUnavailable{err: he.err})? {
+                                       Some((update_add, commitment_signed, chan_monitor)) => {
+                                               if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                                       unimplemented!();
+                                               }
+                                               Some((update_add, commitment_signed))
+                                       },
+                                       None => None,
+                               }
                        };
 
                        let first_hop_node_id = route.hops.first().unwrap().pubkey;
 
                        match res {
-                               Some(msgs) => (first_hop_node_id, msgs),
+                               Some((update_add, commitment_signed)) => {
+                                       (first_hop_node_id, update_add, commitment_signed)
+                               },
                                None => return Ok(()),
                        }
                };
 
-               if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
-                       unimplemented!();
-               }
-
                let mut events = self.pending_events.lock().unwrap();
                events.push(events::Event::UpdateHTLCs {
                        node_id: first_hop_node_id,
@@ -1184,7 +1190,9 @@ impl ChannelManager {
                                },
                                None => return
                        }
-               }; // Release channel lock for install_watch_outpoint call,
+               };
+               // Because we have exclusive ownership of the channel here we can release the channel_state
+               // lock before add_update_monitor
                if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
                        unimplemented!();
                }
@@ -1299,7 +1307,10 @@ impl ChannelManager {
                                                                continue;
                                                        },
                                                };
-                                               new_events.push((Some(monitor), events::Event::UpdateHTLCs {
+                                               if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+                                                       unimplemented!();// but def dont push the event...
+                                               }
+                                               new_events.push(events::Event::UpdateHTLCs {
                                                        node_id: forward_chan.get_their_node_id(),
                                                        updates: msgs::CommitmentUpdate {
                                                                update_add_htlcs: add_htlc_msgs,
@@ -1309,7 +1320,7 @@ impl ChannelManager {
                                                                update_fee: None,
                                                                commitment_signed: commitment_msg,
                                                        },
-                                               }));
+                                               });
                                        }
                                } else {
                                        for HTLCForwardInfo { prev_short_channel_id, prev_htlc_id, forward_info } in pending_forwards.drain(..) {
@@ -1322,10 +1333,10 @@ impl ChannelManager {
                                                        hash_map::Entry::Occupied(mut entry) => entry.get_mut().push(prev_hop_data),
                                                        hash_map::Entry::Vacant(entry) => { entry.insert(vec![prev_hop_data]); },
                                                };
-                                               new_events.push((None, events::Event::PaymentReceived {
+                                               new_events.push(events::Event::PaymentReceived {
                                                        payment_hash: forward_info.payment_hash,
                                                        amt: forward_info.amt_to_forward,
-                                               }));
+                                               });
                                        }
                                }
                        }
@@ -1339,21 +1350,8 @@ impl ChannelManager {
                }
 
                if new_events.is_empty() { return }
-
-               new_events.retain(|event| {
-                       if let &Some(ref monitor) = &event.0 {
-                               if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor.clone()) {
-                                       unimplemented!();// but def dont push the event...
-                               }
-                       }
-                       true
-               });
-
                let mut events = self.pending_events.lock().unwrap();
-               events.reserve(new_events.len());
-               for event in new_events.drain(..) {
-                       events.push(event.1);
-               }
+               events.append(&mut new_events);
        }
 
        /// Indicates that the preimage for payment_hash is unknown after a PaymentReceived event.
@@ -1381,11 +1379,21 @@ impl ChannelManager {
                match source {
                        HTLCSource::OutboundRoute { .. } => {
                                mem::drop(channel_state);
-
-                               let mut pending_events = self.pending_events.lock().unwrap();
-                               pending_events.push(events::Event::PaymentFailed {
-                                       payment_hash: payment_hash.clone()
-                               });
+                               if let &HTLCFailReason::ErrorPacket { ref err } = &onion_error {
+                                       let (channel_update, payment_retryable) = self.process_onion_failure(&source, err.data.clone());
+                                       let mut pending_events = self.pending_events.lock().unwrap();
+                                       if let Some(channel_update) = channel_update {
+                                               pending_events.push(events::Event::PaymentFailureNetworkUpdate {
+                                                       update: channel_update,
+                                               });
+                                       }
+                                       pending_events.push(events::Event::PaymentFailed {
+                                               payment_hash: payment_hash.clone(),
+                                               rejected_by_dest: !payment_retryable,
+                                       });
+                               } else {
+                                       panic!("should have onion error packet here");
+                               }
                        },
                        HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
                                let err_packet = match onion_error {
@@ -1406,7 +1414,13 @@ impl ChannelManager {
 
                                        let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
                                        match chan.get_update_fail_htlc_and_commit(htlc_id, err_packet) {
-                                               Ok(msg) => (chan.get_their_node_id(), msg),
+                                               Ok(Some((msg, commitment_msg, chan_monitor))) => {
+                                                       if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                                               unimplemented!();
+                                                       }
+                                                       (chan.get_their_node_id(), Some((msg, commitment_msg)))
+                                               },
+                                               Ok(None) => (chan.get_their_node_id(), None),
                                                Err(_e) => {
                                                        //TODO: Do something with e?
                                                        return;
@@ -1415,13 +1429,9 @@ impl ChannelManager {
                                };
 
                                match fail_msgs {
-                                       Some((msg, commitment_msg, chan_monitor)) => {
+                                       Some((msg, commitment_msg)) => {
                                                mem::drop(channel_state);
 
-                                               if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
-                                                       unimplemented!();// but def dont push the event...
-                                               }
-
                                                let mut pending_events = self.pending_events.lock().unwrap();
                                                pending_events.push(events::Event::UpdateHTLCs {
                                                        node_id,
@@ -1486,7 +1496,13 @@ impl ChannelManager {
 
                                        let chan = channel_state.by_id.get_mut(&chan_id).unwrap();
                                        match chan.get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
-                                               Ok(msg) => (chan.get_their_node_id(), msg),
+                                               Ok((msgs, Some(chan_monitor))) => {
+                                                       if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                                               unimplemented!();// but def dont push the event...
+                                                       }
+                                                       (chan.get_their_node_id(), msgs)
+                                               },
+                                               Ok((msgs, None)) => (chan.get_their_node_id(), msgs),
                                                Err(_e) => {
                                                        // TODO: There is probably a channel manager somewhere that needs to
                                                        // learn the preimage as the channel may be about to hit the chain.
@@ -1497,13 +1513,7 @@ impl ChannelManager {
                                };
 
                                mem::drop(channel_state);
-                               if let Some(chan_monitor) = fulfill_msgs.1 {
-                                       if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
-                                               unimplemented!();// but def dont push the event...
-                                       }
-                               }
-
-                               if let Some((msg, commitment_msg)) = fulfill_msgs.0 {
+                               if let Some((msg, commitment_msg)) = fulfill_msgs {
                                        let mut pending_events = self.pending_events.lock().unwrap();
                                        pending_events.push(events::Event::UpdateHTLCs {
                                                node_id: node_id,
@@ -1616,10 +1626,9 @@ impl ChannelManager {
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
                        }
-               }; // Release channel lock for install_watch_outpoint call,
-                  // note that this means if the remote end is misbehaving and sends a message for the same
-                  // channel back-to-back with funding_created, we'll end up thinking they sent a message
-                  // for a bogus channel.
+               };
+               // Because we have exclusive ownership of the channel here we can release the channel_state
+               // lock before add_update_monitor
                if let Err(_e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
                        unimplemented!();
                }
@@ -1636,7 +1645,7 @@ impl ChannelManager {
        }
 
        fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
-               let (funding_txo, user_id, monitor) = {
+               let (funding_txo, user_id) = {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        match channel_state.by_id.get_mut(&msg.channel_id) {
                                Some(chan) => {
@@ -1645,14 +1654,14 @@ impl ChannelManager {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
                                        let chan_monitor = chan.funding_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
-                                       (chan.get_funding_txo().unwrap(), chan.get_user_id(), chan_monitor)
+                                       if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                               unimplemented!();
+                                       }
+                                       (chan.get_funding_txo().unwrap(), chan.get_user_id())
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
                        }
                };
-               if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
-                       unimplemented!();
-               }
                let mut pending_events = self.pending_events.lock().unwrap();
                pending_events.push(events::Event::FundingBroadcastSafe {
                        funding_txo: funding_txo,
@@ -1996,9 +2005,9 @@ impl ChannelManager {
                } else { ((None, true)) }
        }
 
-       fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, MsgHandleErrInternal> {
+       fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state = self.channel_state.lock().unwrap();
-               let htlc_source = match channel_state.by_id.get_mut(&msg.channel_id) {
+               match channel_state.by_id.get_mut(&msg.channel_id) {
                        Some(chan) => {
                                if chan.get_their_node_id() != *their_node_id {
                                        //TODO: here and below MsgHandleErrInternal, #153 case
@@ -2009,17 +2018,7 @@ impl ChannelManager {
                        },
                        None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
                }?;
-
-               // we are the origin node and update route information
-               // also determine if the payment is retryable
-               if let &HTLCSource::OutboundRoute { .. } = htlc_source {
-                       let (channel_update, _payment_retry) = self.process_onion_failure(htlc_source, msg.reason.data.clone());
-                       Ok(channel_update)
-                       // TODO: include pyament_retry info in PaymentFailed event that will be
-                       // fired when receiving revoke_and_ack
-               } else {
-                       Ok(None)
-               }
+               Ok(())
        }
 
        fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
@@ -2042,7 +2041,7 @@ impl ChannelManager {
        }
 
        fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(msgs::RevokeAndACK, Option<msgs::CommitmentSigned>), MsgHandleErrInternal> {
-               let (revoke_and_ack, commitment_signed, chan_monitor) = {
+               let (revoke_and_ack, commitment_signed) = {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        match channel_state.by_id.get_mut(&msg.channel_id) {
                                Some(chan) => {
@@ -2050,20 +2049,20 @@ impl ChannelManager {
                                                //TODO: here and below MsgHandleErrInternal, #153 case
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?
+                                       let (revoke_and_ack, commitment_signed, chan_monitor) = chan.commitment_signed(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                       if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                               unimplemented!();
+                                       }
+                                       (revoke_and_ack, commitment_signed)
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
                        }
                };
-               if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
-                       unimplemented!();
-               }
-
                Ok((revoke_and_ack, commitment_signed))
        }
 
        fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<Option<msgs::CommitmentUpdate>, MsgHandleErrInternal> {
-               let ((res, mut pending_forwards, mut pending_failures, chan_monitor), short_channel_id) = {
+               let ((res, mut pending_forwards, mut pending_failures), short_channel_id) = {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        match channel_state.by_id.get_mut(&msg.channel_id) {
                                Some(chan) => {
@@ -2071,14 +2070,15 @@ impl ChannelManager {
                                                //TODO: here and below MsgHandleErrInternal, #153 case
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       (chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?, chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
+                                       let (res, pending_forwards, pending_failures, chan_monitor) = chan.revoke_and_ack(&msg).map_err(|e| MsgHandleErrInternal::from_maybe_close(e))?;
+                                       if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+                                               unimplemented!();
+                                       }
+                                       ((res, pending_forwards, pending_failures), chan.get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
                        }
                };
-               if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
-                       unimplemented!();
-               }
                for failure in pending_failures.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
                }
@@ -2168,26 +2168,27 @@ impl ChannelManager {
                Ok(())
        }
 
-       fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>), MsgHandleErrInternal> {
-               let (res, chan_monitor) = {
+       fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder), MsgHandleErrInternal> {
+               let res = {
                        let mut channel_state = self.channel_state.lock().unwrap();
                        match channel_state.by_id.get_mut(&msg.channel_id) {
                                Some(chan) => {
                                        if chan.get_their_node_id() != *their_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
                                        }
-                                       let (funding_locked, revoke_and_ack, commitment_update, channel_monitor) = chan.channel_reestablish(msg)
+                                       let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, order) = chan.channel_reestablish(msg)
                                                .map_err(|e| MsgHandleErrInternal::from_chan_maybe_close(e, msg.channel_id))?;
-                                       (Ok((funding_locked, revoke_and_ack, commitment_update)), channel_monitor)
+                                       if let Some(monitor) = channel_monitor {
+                                               if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+                                                       unimplemented!();
+                                               }
+                                       }
+                                       Ok((funding_locked, revoke_and_ack, commitment_update, order))
                                },
                                None => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
                        }
                };
-               if let Some(monitor) = chan_monitor {
-                       if let Err(_e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
-                               unimplemented!();
-                       }
-               }
+
                res
        }
 
@@ -2424,7 +2425,7 @@ impl ChannelMessageHandler for ChannelManager {
                handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg), their_node_id)
        }
 
-       fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<Option<msgs::HTLCFailChannelUpdate>, HandleError> {
+       fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
                handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg), their_node_id)
        }
 
@@ -2448,7 +2449,7 @@ impl ChannelMessageHandler for ChannelManager {
                handle_error!(self, self.internal_announcement_signatures(their_node_id, msg), their_node_id)
        }
 
-       fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>), HandleError> {
+       fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(Option<msgs::FundingLocked>, Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder), HandleError> {
                handle_error!(self, self.internal_channel_reestablish(their_node_id, msg), their_node_id)
        }
 
@@ -3296,8 +3297,9 @@ mod tests {
                        let events = origin_node.node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 1);
                        match events[0] {
-                               Event::PaymentFailed { payment_hash } => {
+                               Event::PaymentFailed { payment_hash, rejected_by_dest } => {
                                        assert_eq!(payment_hash, our_payment_hash);
+                                       assert!(rejected_by_dest);
                                },
                                _ => panic!("Unexpected event"),
                        }
@@ -4937,6 +4939,7 @@ mod tests {
                                assert!(chan_msgs.0.is_none());
                        }
                        if pending_raa.0 {
+                               assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst);
                                assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
                                check_added_monitors!(node_a, 1);
                        } else {
@@ -4984,6 +4987,7 @@ mod tests {
                                assert!(chan_msgs.0.is_none());
                        }
                        if pending_raa.1 {
+                               assert!(chan_msgs.3 == msgs::RAACommitmentOrder::RevokeAndACKFirst);
                                assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
                                check_added_monitors!(node_b, 1);
                        } else {
@@ -5064,8 +5068,9 @@ mod tests {
                                _ => panic!("Unexpected event"),
                        }
                        match events[1] {
-                               Event::PaymentFailed { payment_hash } => {
+                               Event::PaymentFailed { payment_hash, rejected_by_dest } => {
                                        assert_eq!(payment_hash, payment_hash_5);
+                                       assert!(rejected_by_dest);
                                },
                                _ => panic!("Unexpected event"),
                        }
@@ -5314,6 +5319,141 @@ mod tests {
                claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
        }
 
+       #[test]
+       fn test_drop_messages_peer_disconnect_dual_htlc() {
+               // Test that we can handle reconnecting when both sides of a channel have pending
+               // commitment_updates when we disconnect.
+               let mut nodes = create_network(2);
+               create_announced_chan_between_nodes(&nodes, 0, 1);
+
+               let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+               // Now try to send a second payment which will fail to send
+               let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+               let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+
+               nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
+               check_added_monitors!(nodes[0], 1);
+
+               let events_1 = nodes[0].node.get_and_clear_pending_events();
+               assert_eq!(events_1.len(), 1);
+               match events_1[0] {
+                       Event::UpdateHTLCs { .. } => {},
+                       _ => panic!("Unexpected event"),
+               }
+
+               assert!(nodes[1].node.claim_funds(payment_preimage_1));
+               check_added_monitors!(nodes[1], 1);
+
+               let events_2 = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(events_2.len(), 1);
+               match events_2[0] {
+                       Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+                               assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+                               assert!(update_add_htlcs.is_empty());
+                               assert_eq!(update_fulfill_htlcs.len(), 1);
+                               assert!(update_fail_htlcs.is_empty());
+                               assert!(update_fail_malformed_htlcs.is_empty());
+                               assert!(update_fee.is_none());
+
+                               nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
+                               let events_3 = nodes[0].node.get_and_clear_pending_events();
+                               assert_eq!(events_3.len(), 1);
+                               match events_3[0] {
+                                       Event::PaymentSent { ref payment_preimage } => {
+                                               assert_eq!(*payment_preimage, payment_preimage_1);
+                                       },
+                                       _ => panic!("Unexpected event"),
+                               }
+
+                               let (_, commitment_update) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap();
+                               assert!(commitment_update.is_none());
+                               check_added_monitors!(nodes[0], 1);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
+               nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+               nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+               let reestablish_1 = nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+               assert_eq!(reestablish_1.len(), 1);
+               let reestablish_2 = nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+               assert_eq!(reestablish_2.len(), 1);
+
+               let as_resp = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+               let bs_resp = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+
+               assert!(as_resp.0.is_none());
+               assert!(bs_resp.0.is_none());
+
+               assert!(bs_resp.1.is_none());
+               assert!(bs_resp.2.is_none());
+
+               assert!(as_resp.3 == msgs::RAACommitmentOrder::CommitmentFirst);
+
+               assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
+               assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
+               assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
+               assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
+               assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
+               nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap();
+               let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap();
+               assert!(bs_commitment_signed.is_none());
+               check_added_monitors!(nodes[1], 1);
+
+               let bs_second_commitment_signed = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap().unwrap();
+               assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
+               assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
+               assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
+               assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
+               assert!(bs_second_commitment_signed.update_fee.is_none());
+               check_added_monitors!(nodes[1], 1);
+
+               let as_commitment_signed = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().unwrap();
+               assert!(as_commitment_signed.update_add_htlcs.is_empty());
+               assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
+               assert!(as_commitment_signed.update_fail_htlcs.is_empty());
+               assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
+               assert!(as_commitment_signed.update_fee.is_none());
+               check_added_monitors!(nodes[0], 1);
+
+               let (as_revoke_and_ack, as_second_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap();
+               assert!(as_second_commitment_signed.is_none());
+               check_added_monitors!(nodes[0], 1);
+
+               let (bs_second_revoke_and_ack, bs_third_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap();
+               assert!(bs_third_commitment_signed.is_none());
+               check_added_monitors!(nodes[1], 1);
+
+               assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+               check_added_monitors!(nodes[1], 1);
+
+               let events_4 = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(events_4.len(), 1);
+               match events_4[0] {
+                       Event::PendingHTLCsForwardable { .. } => { },
+                       _ => panic!("Unexpected event"),
+               };
+
+               nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
+               nodes[1].node.process_pending_htlc_forwards();
+
+               let events_5 = nodes[1].node.get_and_clear_pending_events();
+               assert_eq!(events_5.len(), 1);
+               match events_5[0] {
+                       Event::PaymentReceived { ref payment_hash, amt: _ } => {
+                               assert_eq!(payment_hash_2, *payment_hash);
+                       },
+                       _ => panic!("Unexpected event"),
+               }
+
+               assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap().is_none());
+               check_added_monitors!(nodes[0], 1);
+
+               claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+       }
+
        #[test]
        fn test_invalid_channel_announcement() {
                //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs