Merge pull request #349 from ariard/2019-07-data_loss
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Tue, 6 Aug 2019 21:12:57 +0000 (21:12 +0000)
committerGitHub <noreply@github.com>
Tue, 6 Aug 2019 21:12:57 +0000 (21:12 +0000)
Implement option_data_loss_protect on both sides

1  2 
src/ln/channel.rs
src/ln/functional_tests.rs
src/ln/peer_handler.rs

diff --combined src/ln/channel.rs
index 9dfa4c459714e600f7a9387f7acc91c28b36b70c,5c0ca8fd6609cfe07204ffed6d3e6c9d7a11268b..f523471210abaf4650700d42814f75d8c75112f4
@@@ -16,7 -16,7 +16,7 @@@ use secp256k1::{Secp256k1,Signature}
  use secp256k1;
  
  use ln::msgs;
- use ln::msgs::{DecodeError, OptionalField, LocalFeatures};
+ use ln::msgs::{DecodeError, OptionalField, LocalFeatures, DataLossProtect};
  use ln::channelmonitor::ChannelMonitor;
  use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingForwardHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
  use ln::chan_utils::{TxCreationKeys,HTLCOutputInCommitment,HTLC_SUCCESS_TX_WEIGHT,HTLC_TIMEOUT_TX_WEIGHT};
@@@ -32,7 -32,7 +32,7 @@@ use util::config::{UserConfig,ChannelCo
  
  use std;
  use std::default::Default;
- use std::{cmp,mem};
+ use std::{cmp,mem,fmt};
  use std::sync::{Arc};
  
  #[cfg(test)]
@@@ -366,10 -366,23 +366,23 @@@ pub const OFFERED_HTLC_SCRIPT_WEIGHT: u
  /// Used to return a simple Error back to ChannelManager. Will get converted to a
  /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
  /// channel_id in ChannelManager.
- #[derive(Debug)]
  pub(super) enum ChannelError {
        Ignore(&'static str),
        Close(&'static str),
+       CloseDelayBroadcast {
+               msg: &'static str,
+               update: Option<ChannelMonitor>
+       },
+ }
+ impl fmt::Debug for ChannelError {
+       fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+               match self {
+                       &ChannelError::Ignore(e) => write!(f, "Ignore : {}", e),
+                       &ChannelError::Close(e) => write!(f, "Close : {}", e),
+                       &ChannelError::CloseDelayBroadcast { msg, .. } => write!(f, "CloseDelayBroadcast : {}", msg)
+               }
+       }
  }
  
  macro_rules! secp_check {
@@@ -1661,9 -1674,10 +1674,9 @@@ impl Channel 
                if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
                        return Err(ChannelError::Close("Remote tried to push more than our max accepted HTLCs"));
                }
 -              //TODO: Spec is unclear if this is per-direction or in total (I assume per direction):
                // Check our_max_htlc_value_in_flight_msat
                if htlc_inbound_value_msat + msg.amount_msat > Channel::get_our_max_htlc_value_in_flight_msat(self.channel_value_satoshis) {
 -                      return Err(ChannelError::Close("Remote HTLC add would put them over their max HTLC value in flight"));
 +                      return Err(ChannelError::Close("Remote HTLC add would put them over our max HTLC value"));
                }
                // Check our_channel_reserve_satoshis (we're getting paid, so they have to at least meet
                // the reserve_satoshis we told them to always have as direct payment so that they lose
                        return Err(ChannelError::Close("Peer sent a garbage channel_reestablish"));
                }
  
+               if msg.next_remote_commitment_number > 0 {
+                       match msg.data_loss_protect {
+                               OptionalField::Present(ref data_loss) => {
+                                       if chan_utils::build_commitment_secret(self.local_keys.commitment_seed, INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1) != data_loss.your_last_per_commitment_secret {
+                                               return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided"));
+                                       }
+                                       if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_local_commitment_transaction_number {
+                                               self.channel_monitor.provide_rescue_remote_commitment_tx_info(data_loss.my_current_per_commitment_point);
+                                               return Err(ChannelError::CloseDelayBroadcast { msg: "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting", update: Some(self.channel_monitor.clone())
+                                       });
+                                       }
+                               },
+                               OptionalField::Absent => {}
+                       }
+               }
                // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
                // remaining cases either succeed or ErrorMessage-fail).
                self.channel_state &= !(ChannelState::PeerDisconnected as u32);
                                // now!
                                match self.free_holding_cell_htlcs() {
                                        Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
-                                       Err(ChannelError::Ignore(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
+                                       Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast { .. }) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
                                        Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
                                        Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
                                }
        pub fn get_channel_reestablish(&self) -> msgs::ChannelReestablish {
                assert_eq!(self.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
                assert_ne!(self.cur_remote_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
+               let data_loss_protect = if self.cur_remote_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
+                       let remote_last_secret = self.channel_monitor.get_secret(self.cur_remote_commitment_transaction_number + 2).unwrap();
+                       log_trace!(self, "Enough info to generate a Data Loss Protect with per_commitment_secret {}", log_bytes!(remote_last_secret));
+                       OptionalField::Present(DataLossProtect {
+                               your_last_per_commitment_secret: remote_last_secret,
+                               my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number + 1))
+                       })
+               } else {
+                       log_debug!(self, "We don't seen yet any revoked secret, if this channnel has already been updated it means we are fallen-behind, you should wait for other peer closing");
+                       OptionalField::Present(DataLossProtect {
+                               your_last_per_commitment_secret: [0;32],
+                               my_current_per_commitment_point: PublicKey::from_secret_key(&self.secp_ctx, &self.build_local_commitment_secret(self.cur_local_commitment_transaction_number))
+                       })
+               };
                msgs::ChannelReestablish {
                        channel_id: self.channel_id(),
                        // The protocol has two different commitment number concepts - the "commitment
                        // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
                        // overflow here.
                        next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.cur_remote_commitment_transaction_number - 1,
-                       data_loss_protect: OptionalField::Absent,
+                       data_loss_protect,
                }
        }
  
                if outbound_htlc_count + 1 > self.their_max_accepted_htlcs as u32 {
                        return Err(ChannelError::Ignore("Cannot push more than their max accepted HTLCs"));
                }
 -              //TODO: Spec is unclear if this is per-direction or in total (I assume per direction):
                // Check their_max_htlc_value_in_flight_msat
                if htlc_outbound_value_msat + amount_msat > self.their_max_htlc_value_in_flight_msat {
 -                      return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight"));
 +                      return Err(ChannelError::Ignore("Cannot send value that would put us over the max HTLC value in flight our peer will accept"));
                }
  
                // Check self.their_channel_reserve_satoshis (the amount we must keep as
                // reserve for them to have something to claim if we misbehave)
                if self.value_to_self_msat < self.their_channel_reserve_satoshis * 1000 + amount_msat + htlc_outbound_value_msat {
 -                      return Err(ChannelError::Ignore("Cannot send value that would put us over the reserve value"));
 +                      return Err(ChannelError::Ignore("Cannot send value that would put us over their reserve value"));
                }
  
                //TODO: Check cltv_expiry? Do this in channel manager?
index cb59aed83351287d2f5bd04fd8125710932cd4d5,9debe08fa28889f74dc97379b1412a27eca32eb2..5446c80aad821d8960703f58ca4fd902cd4ff345
@@@ -3,8 -3,9 +3,9 @@@
  //! claim outputs on-chain.
  
  use chain::transaction::OutPoint;
- use chain::chaininterface::{ChainListener, ChainWatchInterface};
+ use chain::chaininterface::{ChainListener, ChainWatchInterface, ChainWatchInterfaceUtil};
  use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor, KeysManager};
+ use chain::keysinterface;
  use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
  use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT};
  use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
@@@ -18,6 -19,7 +19,7 @@@ use util::events::{Event, EventsProvide
  use util::errors::APIError;
  use util::ser::{Writeable, ReadableArgs};
  use util::config::UserConfig;
+ use util::logger::Logger;
  
  use bitcoin::util::hash::BitcoinHash;
  use bitcoin_hashes::sha256d::Hash as Sha256dHash;
@@@ -39,7 -41,7 +41,7 @@@ use secp256k1::key::{PublicKey,SecretKe
  
  use std::collections::{BTreeSet, HashMap, HashSet};
  use std::default::Default;
- use std::sync::Arc;
+ use std::sync::{Arc, Mutex};
  use std::sync::atomic::Ordering;
  use std::mem;
  
@@@ -1234,7 -1236,7 +1236,7 @@@ fn do_channel_reserve_test(test_recv: b
                assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
                let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
                match err {
 -                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight"),
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"),
                        _ => panic!("Unknown error variants"),
                }
        }
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
                let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
                match err {
 -                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
                match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
 -                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
        {
                let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
                match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
 -                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the reserve value"),
 +                      APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
                        _ => panic!("Unknown error variants"),
                }
        }
@@@ -5017,7 -5019,7 +5019,7 @@@ fn test_update_add_htlc_bolt2_sender_ex
        let err = nodes[0].node.send_payment(route, our_payment_hash);
  
        if let Err(APIError::ChannelUnavailable{err}) = err {
 -              assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight");
 +              assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept");
        } else {
                assert!(false);
        }
@@@ -5141,7 -5143,7 +5143,7 @@@ fn test_update_add_htlc_bolt2_receiver_
        let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
  
        if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
 -              assert_eq!(err,"Remote HTLC add would put them over their max HTLC value in flight");
 +              assert_eq!(err,"Remote HTLC add would put them over our max HTLC value");
        } else {
                assert!(false);
        }
@@@ -5945,3 -5947,113 +5947,113 @@@ fn test_user_configurable_csv_delay() 
                }
        } else { assert!(false); }
  }
+ #[test]
+ fn test_data_loss_protect() {
+       // We want to be sure that :
+       // * we don't broadcast our Local Commitment Tx in case of fallen behind
+       // * we close channel in case of detecting other being fallen behind
+       // * we are able to claim our own outputs thanks to remote my_current_per_commitment_point
+       let mut nodes = create_network(2, &[None, None]);
+       let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
+       // Cache node A state before any channel update
+       let previous_node_state = nodes[0].node.encode();
+       let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new());
+       nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap();
+       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+       nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+       nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+       // Restore node A from previous state
+       let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", 0)));
+       let chan_monitor = <(Sha256dHash, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0), Arc::clone(&logger)).unwrap().1;
+       let chain_monitor = Arc::new(ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
+       let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
+       let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
+       let monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone()));
+       let mut channel_monitors = HashMap::new();
+       channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &chan_monitor);
+       let node_state_0 = <(Sha256dHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+               keys_manager: Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::clone(&logger), 42, 21)),
+               fee_estimator: feeest.clone(),
+               monitor: monitor.clone(),
+               chain_monitor: chain_monitor.clone(),
+               logger: Arc::clone(&logger),
+               tx_broadcaster,
+               default_config: UserConfig::new(),
+               channel_monitors: &channel_monitors
+       }).unwrap().1;
+       nodes[0].node = Arc::new(node_state_0);
+       monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok();
+       nodes[0].chan_monitor = monitor;
+       nodes[0].chain_monitor = chain_monitor;
+       check_added_monitors!(nodes[0], 1);
+       nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+       nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+       let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+       // Check we update monitor following learning of per_commitment_point from B
+       if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0])  {
+               if let Some(error) = err.action {
+                       match error {
+                               ErrorAction::SendErrorMessage { msg } => {
+                                       assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
+                               },
+                               _ => panic!("Unexpected event!"),
+                       }
+               } else { assert!(false); }
+       } else { assert!(false); }
+       check_added_monitors!(nodes[0], 1);
+       {
+               let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+               assert_eq!(node_txn.len(), 0);
+       }
+       let mut reestablish_1 = Vec::with_capacity(1);
+       for msg in nodes[0].node.get_and_clear_pending_msg_events() {
+               if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
+                       assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+                       reestablish_1.push(msg.clone());
+               } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+               } else {
+                       panic!("Unexpected event")
+               }
+       }
+       // Check we close channel detecting A is fallen-behind
+       if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) {
+               if let Some(error) = err.action {
+                       match error {
+                               ErrorAction::SendErrorMessage { msg } => {
+                                       assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
+                               _ => panic!("Unexpected event!"),
+                       }
+               } else { assert!(false); }
+       } else { assert!(false); }
+       let events = nodes[1].node.get_and_clear_pending_msg_events();
+       assert_eq!(events.len(), 1);
+       match events[0] {
+               MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+               _ => panic!("Unexpected event"),
+       }
+       // Check A is able to claim to_remote output
+       let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+       assert_eq!(node_txn.len(), 1);
+       check_spends!(node_txn[0], chan.3.clone());
+       assert_eq!(node_txn[0].output.len(), 2);
+       let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+       nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()]}, 1);
+       let spend_txn = check_spendable_outputs!(nodes[0], 1);
+       assert_eq!(spend_txn.len(), 1);
+       check_spends!(spend_txn[0], node_txn[0].clone());
+ }
diff --combined src/ln/peer_handler.rs
index a13705f4aeeb9ff8fa3182ee707a196253142a44,0e390bd32fc9d777dbd7dddbc45e071683c69514..8094f256195d2fa5e1dd5c20c1f44ef3c3489570
@@@ -45,13 -45,12 +45,13 @@@ pub struct MessageHandler 
  /// careful to ensure you don't have races whereby you might register a new connection with an fd
  /// the same as a yet-to-be-disconnect_event()-ed.
  pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
 -      /// Attempts to send some data from the given Vec starting at the given offset to the peer.
 +      /// Attempts to send some data from the given slice to the peer.
 +      ///
        /// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected.
        /// Note that in the disconnected case, a disconnect_event must still fire and further write
        /// attempts may occur until that time.
        ///
 -      /// If the returned size is smaller than data.len() - write_offset, a write_available event must
 +      /// If the returned size is smaller than data.len(), a write_available event must
        /// trigger the next time more data can be written. Additionally, until the a send_data event
        /// completes fully, no further read_events should trigger on the same peer!
        ///
@@@ -59,7 -58,7 +59,7 @@@
        /// events should be paused to prevent DoS in the send buffer), resume_read may be set
        /// indicating that read events on this descriptor should resume. A resume_read of false does
        /// *not* imply that further read events should be paused.
 -      fn send_data(&mut self, data: &Vec<u8>, write_offset: usize, resume_read: bool) -> usize;
 +      fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize;
        /// Disconnect the socket pointed to by this SocketDescriptor. Once this function returns, no
        /// more calls to write_event, read_event or disconnect_event may be made with this descriptor.
        /// No disconnect_event should be generated as a result of this call, though obviously races
@@@ -388,8 -387,7 +388,8 @@@ impl<Descriptor: SocketDescriptor> Peer
                                };
  
                                let should_be_reading = peer.pending_outbound_buffer.len() < MSG_BUFF_SIZE;
 -                              let data_sent = descriptor.send_data(next_buff, peer.pending_outbound_buffer_first_msg_offset, should_be_reading);
 +                              let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
 +                              let data_sent = descriptor.send_data(pending, should_be_reading);
                                peer.pending_outbound_buffer_first_msg_offset += data_sent;
                                if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() { true } else { false }
                        } {
                                                                                                        log_info!(self, "Peer local features required unknown version bits");
                                                                                                        return Err(PeerHandleError{ no_connection_possible: true });
                                                                                                }
-                                                                                               if msg.local_features.requires_data_loss_protect() {
-                                                                                                       log_info!(self, "Peer local features required data_loss_protect");
-                                                                                                       return Err(PeerHandleError{ no_connection_possible: true });
-                                                                                               }
                                                                                                if peer.their_global_features.is_some() {
                                                                                                        return Err(PeerHandleError{ no_connection_possible: false });
                                                                                                }
@@@ -1124,8 -1118,9 +1120,8 @@@ mod tests 
        }
  
        impl SocketDescriptor for FileDescriptor {
 -              fn send_data(&mut self, data: &Vec<u8>, write_offset: usize, _resume_read: bool) -> usize {
 -                      assert!(write_offset < data.len());
 -                      data.len() - write_offset
 +              fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize {
 +                      data.len()
                }
  
                fn disconnect_socket(&mut self) {}