Merge pull request #320 from TheBlueMatt/2019-03-chan-send-rewrite
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Mon, 22 Apr 2019 21:32:24 +0000 (17:32 -0400)
committerGitHub <noreply@github.com>
Mon, 22 Apr 2019 21:32:24 +0000 (17:32 -0400)
Rewrite Channel resend tracking to make it much more reliable

1  2 
fuzz/fuzz_targets/chanmon_fail_consistency.rs
src/ln/channel.rs

index b12ebea60f5d43806e46f7d2cafb4db2cc6e43a0,0d966d5d777529881a8ec8505f474515f8a46a3d..5a8790903438fbd5d59926998fb98e9eba2967ee
@@@ -49,6 -49,8 +49,8 @@@ use utils::test_logger
  use secp256k1::key::{PublicKey,SecretKey};
  use secp256k1::Secp256k1;
  
+ use std::cmp::Ordering;
+ use std::collections::HashSet;
  use std::sync::{Arc,Mutex};
  use std::io::Cursor;
  
@@@ -69,9 -71,9 +71,9 @@@ pub struct TestChannelMonitor 
        pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
  }
  impl TestChannelMonitor {
 -      pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>) -> Self {
 +      pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>, feeest: Arc<chaininterface::FeeEstimator>) -> Self {
                Self {
 -                      simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger),
 +                      simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, feeest),
                        update_ret: Mutex::new(Ok(())),
                }
        }
@@@ -142,13 -144,13 +144,13 @@@ pub fn do_test(data: &[u8]) 
                ($node_id: expr) => { {
                        let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string()));
                        let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
 -                      let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone()));
 +                      let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
  
                        let keys_manager = Arc::new(KeyProvider { node_id: $node_id });
                        let mut config = UserConfig::new();
                        config.channel_options.fee_proportional_millionths = 0;
                        config.channel_options.announced_channel = true;
 -                      config.channel_limits.min_dust_limit_satoshis = 0;
 +                      config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
                        (ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap(),
                        monitor)
                } }
  
                macro_rules! process_events {
                        ($node: expr, $fail: expr) => { {
-                               for event in nodes[$node].get_and_clear_pending_events() {
+                               // In case we get 256 payments we may have a hash collision, resulting in the
+                               // second claim/fail call not finding the duplicate-hash HTLC, so we have to
+                               // deduplicate the calls here.
+                               let mut claim_set = HashSet::new();
+                               let mut events = nodes[$node].get_and_clear_pending_events();
+                               // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
+                               // case where we first process a PendingHTLCsForwardable, then claim/fail on a
+                               // PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
+                               // PaymentReceived event for the second HTLC in our pending_events (and breaking
+                               // our claim_set deduplication).
+                               events.sort_by(|a, b| {
+                                       if let events::Event::PaymentReceived { .. } = a {
+                                               if let events::Event::PendingHTLCsForwardable { .. } = b {
+                                                       Ordering::Less
+                                               } else { Ordering::Equal }
+                                       } else if let events::Event::PendingHTLCsForwardable { .. } = a {
+                                               if let events::Event::PaymentReceived { .. } = b {
+                                                       Ordering::Greater
+                                               } else { Ordering::Equal }
+                                       } else { Ordering::Equal }
+                               });
+                               for event in events.drain(..) {
                                        match event {
                                                events::Event::PaymentReceived { payment_hash, .. } => {
-                                                       if $fail {
-                                                               assert!(nodes[$node].fail_htlc_backwards(&payment_hash));
-                                                       } else {
-                                                               assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)));
+                                                       if claim_set.insert(payment_hash.0) {
+                                                               if $fail {
+                                                                       assert!(nodes[$node].fail_htlc_backwards(&payment_hash));
+                                                               } else {
+                                                                       assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)));
+                                                               }
                                                        }
                                                },
                                                events::Event::PaymentSent { .. } => {},
diff --combined src/ln/channel.rs
index 17d1d5aa042301ebf16b7a0190089ca406ca4cf0,e73abf92c99e000c86a4cef8424f3d1d130ad03c..7b32e16e1dd5fbc7dada2aad62acfe6ccd1cb1b5
@@@ -237,19 -237,21 +237,21 @@@ pub(super) struct Channel 
        cur_local_commitment_transaction_number: u64,
        cur_remote_commitment_transaction_number: u64,
        value_to_self_msat: u64, // Excluding all pending_htlcs, excluding fees
-       /// Upon receipt of a channel_reestablish we have to figure out whether to send a
-       /// revoke_and_ack first or a commitment update first. Generally, we prefer to send
-       /// revoke_and_ack first, but if we had a pending commitment update of our own waiting on a
-       /// remote revoke when we received the latest commitment update from the remote we have to make
-       /// sure that commitment update gets resent first.
-       received_commitment_while_awaiting_raa: bool,
        pending_inbound_htlcs: Vec<InboundHTLCOutput>,
        pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
        holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
  
+       /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
+       /// need to ensure we resend them in the order we originally generated them. Note that because
+       /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
+       /// sufficient to simply set this to the opposite of any message we are generating as we
+       /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
+       /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
+       /// send it first.
+       resend_order: RAACommitmentOrder,
        monitor_pending_revoke_and_ack: bool,
        monitor_pending_commitment_signed: bool,
-       monitor_pending_order: Option<RAACommitmentOrder>,
        monitor_pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>,
        monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
  
@@@ -410,6 -412,13 +412,6 @@@ impl Channel 
                1000 // TODO
        }
  
 -      fn derive_minimum_depth(_channel_value_satoshis_msat: u64, _value_to_self_msat: u64) -> u32 {
 -              // Note that in order to comply with BOLT 7 announcement_signatures requirements this must
 -              // be at least 6.
 -              const CONF_TARGET: u32 = 12; //TODO: Should be much higher
 -              CONF_TARGET
 -      }
 -
        // Constructors:
        pub fn new_outbound(fee_estimator: &FeeEstimator, keys_provider: &Arc<KeysInterface>, their_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, logger: Arc<Logger>, config: &UserConfig) -> Result<Channel, APIError> {
                let chan_keys = keys_provider.get_channel_keys(false);
                        cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat: channel_value_satoshis * 1000 - push_msat,
-                       received_commitment_while_awaiting_raa: false,
  
                        pending_inbound_htlcs: Vec::new(),
                        pending_outbound_htlcs: Vec::new(),
                        next_remote_htlc_id: 0,
                        channel_update_count: 1,
  
+                       resend_order: RAACommitmentOrder::CommitmentFirst,
                        monitor_pending_revoke_and_ack: false,
                        monitor_pending_commitment_signed: false,
-                       monitor_pending_order: None,
                        monitor_pending_forwards: Vec::new(),
                        monitor_pending_failures: Vec::new(),
  
                }
  
                // Now check against optional parameters as set by config...
 -              if msg.funding_satoshis < config.channel_limits.min_funding_satoshis {
 +              if msg.funding_satoshis < config.peer_channel_config_limits.min_funding_satoshis {
                        return Err(ChannelError::Close("funding satoshis is less than the user specified limit"));
                }
 -              if msg.htlc_minimum_msat > config.channel_limits.max_htlc_minimum_msat {
 +              if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
                        return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit"));
                }
 -              if msg.max_htlc_value_in_flight_msat < config.channel_limits.min_max_htlc_value_in_flight_msat {
 +              if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
                        return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit"));
                }
 -              if msg.channel_reserve_satoshis > config.channel_limits.max_channel_reserve_satoshis {
 +              if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
                        return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit"));
                }
 -              if msg.max_accepted_htlcs < config.channel_limits.min_max_accepted_htlcs {
 +              if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
                        return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit"));
                }
 -              if msg.dust_limit_satoshis < config.channel_limits.min_dust_limit_satoshis {
 +              if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis {
                        return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit"));
                }
 -              if msg.dust_limit_satoshis > config.channel_limits.max_dust_limit_satoshis {
 +              if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis {
                        return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit"));
                }
  
                // Convert things into internal flags and prep our state:
  
                let their_announce = if (msg.channel_flags & 1) == 1 { true } else { false };
 -              if config.channel_limits.force_announced_channel_preference {
 +              if config.peer_channel_config_limits.force_announced_channel_preference {
                        if local_config.announced_channel != their_announce {
                                return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours"));
                        }
                        cur_local_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        cur_remote_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
                        value_to_self_msat: msg.push_msat,
-                       received_commitment_while_awaiting_raa: false,
  
                        pending_inbound_htlcs: Vec::new(),
                        pending_outbound_htlcs: Vec::new(),
                        next_remote_htlc_id: 0,
                        channel_update_count: 1,
  
+                       resend_order: RAACommitmentOrder::CommitmentFirst,
                        monitor_pending_revoke_and_ack: false,
                        monitor_pending_commitment_signed: false,
-                       monitor_pending_order: None,
                        monitor_pending_forwards: Vec::new(),
                        monitor_pending_failures: Vec::new(),
  
                        our_htlc_minimum_msat: Channel::derive_our_htlc_minimum_msat(msg.feerate_per_kw as u64),
                        their_to_self_delay: msg.to_self_delay,
                        their_max_accepted_htlcs: msg.max_accepted_htlcs,
 -                      minimum_depth: Channel::derive_minimum_depth(msg.funding_satoshis*1000, msg.push_msat),
 +                      minimum_depth: config.own_channel_config.minimum_depth,
  
                        their_funding_pubkey: Some(msg.funding_pubkey),
                        their_revocation_basepoint: Some(msg.revocation_basepoint),
                }
  
                // Now check against optional parameters as set by config...
 -              if msg.htlc_minimum_msat > config.channel_limits.max_htlc_minimum_msat {
 +              if msg.htlc_minimum_msat > config.peer_channel_config_limits.max_htlc_minimum_msat {
                        return Err(ChannelError::Close("htlc minimum msat is higher than the user specified limit"));
                }
 -              if msg.max_htlc_value_in_flight_msat < config.channel_limits.min_max_htlc_value_in_flight_msat {
 +              if msg.max_htlc_value_in_flight_msat < config.peer_channel_config_limits.min_max_htlc_value_in_flight_msat {
                        return Err(ChannelError::Close("max htlc value in flight msat is less than the user specified limit"));
                }
 -              if msg.channel_reserve_satoshis > config.channel_limits.max_channel_reserve_satoshis {
 +              if msg.channel_reserve_satoshis > config.peer_channel_config_limits.max_channel_reserve_satoshis {
                        return Err(ChannelError::Close("channel reserve satoshis is higher than the user specified limit"));
                }
 -              if msg.max_accepted_htlcs < config.channel_limits.min_max_accepted_htlcs {
 +              if msg.max_accepted_htlcs < config.peer_channel_config_limits.min_max_accepted_htlcs {
                        return Err(ChannelError::Close("max accepted htlcs is less than the user specified limit"));
                }
 -              if msg.dust_limit_satoshis < config.channel_limits.min_dust_limit_satoshis {
 +              if msg.dust_limit_satoshis < config.peer_channel_config_limits.min_dust_limit_satoshis {
                        return Err(ChannelError::Close("dust limit satoshis is less than the user specified limit"));
                }
 -              if msg.dust_limit_satoshis > config.channel_limits.max_dust_limit_satoshis {
 +              if msg.dust_limit_satoshis > config.peer_channel_config_limits.max_dust_limit_satoshis {
                        return Err(ChannelError::Close("dust limit satoshis is greater than the user specified limit"));
                }
 -              if msg.minimum_depth > config.channel_limits.max_minimum_depth {
 +              if msg.minimum_depth > config.peer_channel_config_limits.max_minimum_depth {
                        return Err(ChannelError::Close("We consider the minimum depth to be unreasonably large"));
                }
  
                        }
                }
  
-               if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
-                       // This is a response to our post-monitor-failed unfreeze messages, so we can clear the
-                       // monitor_pending_order requirement as we won't re-send the monitor_pending messages.
-                       self.monitor_pending_order = None;
-               }
                self.channel_monitor.provide_latest_local_commitment_tx_info(local_commitment_tx.0, local_keys, self.feerate_per_kw, htlcs_and_sigs);
  
                for htlc in self.pending_inbound_htlcs.iter_mut() {
  
                self.cur_local_commitment_transaction_number -= 1;
                self.last_local_commitment_txn = new_local_commitment_txn;
-               self.received_commitment_while_awaiting_raa = (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) != 0;
+               // Note that if we need_our_commitment & !AwaitingRemoteRevoke we'll call
+               // send_commitment_no_status_check() next which will reset this to RAAFirst.
+               self.resend_order = RAACommitmentOrder::CommitmentFirst;
  
                if (self.channel_state & ChannelState::MonitorUpdateFailed as u32) != 0 {
                        // In case we initially failed monitor updating without requiring a response, we need
                        // to make sure the RAA gets sent first.
-                       if !self.monitor_pending_commitment_signed {
-                               self.monitor_pending_order = Some(RAACommitmentOrder::RevokeAndACKFirst);
-                       }
                        self.monitor_pending_revoke_and_ack = true;
                        if need_our_commitment && (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
                                // If we were going to send a commitment_signed after the RAA, go ahead and do all
                self.their_prev_commitment_point = self.their_cur_commitment_point;
                self.their_cur_commitment_point = Some(msg.next_per_commitment_point);
                self.cur_remote_commitment_transaction_number -= 1;
-               self.received_commitment_while_awaiting_raa = false;
-               if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
-                       // This is a response to our post-monitor-failed unfreeze messages, so we can clear the
-                       // monitor_pending_order requirement as we won't re-send the monitor_pending messages.
-                       self.monitor_pending_order = None;
-               }
  
                log_trace!(self, "Updating HTLCs on receipt of RAA...");
                let mut to_forward_infos = Vec::new();
                                // When the monitor updating is restored we'll call get_last_commitment_update(),
                                // which does not update state, but we're definitely now awaiting a remote revoke
                                // before we can step forward any more, so set it here.
-                               self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
+                               self.send_commitment_no_status_check()?;
                        }
                        self.monitor_pending_forwards.append(&mut to_forward_infos);
                        self.monitor_pending_failures.append(&mut revoked_htlcs);
        /// Indicates that a ChannelMonitor update failed to be stored by the client and further
        /// updates are partially paused.
        /// This must be called immediately after the call which generated the ChannelMonitor update
-       /// which failed, with the order argument set to the type of call it represented (ie a
-       /// commitment update or a revoke_and_ack generation). The messages which were generated from
-       /// that original call must *not* have been sent to the remote end, and must instead have been
-       /// dropped. They will be regenerated when monitor_updating_restored is called.
-       pub fn monitor_update_failed(&mut self, order: RAACommitmentOrder, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
+       /// which failed. The messages which were generated from that call which generated the
+       /// monitor update failure must *not* have been sent to the remote end, and must instead
+       /// have been dropped. They will be regenerated when monitor_updating_restored is called.
+       pub fn monitor_update_failed(&mut self, resend_raa: bool, resend_commitment: bool, mut pending_forwards: Vec<(PendingForwardHTLCInfo, u64)>, mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>) {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, 0);
                self.monitor_pending_revoke_and_ack = resend_raa;
                self.monitor_pending_commitment_signed = resend_commitment;
-               self.monitor_pending_order = Some(order);
                assert!(self.monitor_pending_forwards.is_empty());
                mem::swap(&mut pending_forwards, &mut self.monitor_pending_forwards);
                assert!(self.monitor_pending_failures.is_empty());
                mem::swap(&mut failures, &mut self.monitor_pending_failures);
  
                if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
-                       // Leave monitor_pending_order so we can order our channel_reestablish responses
                        self.monitor_pending_revoke_and_ack = false;
                        self.monitor_pending_commitment_signed = false;
                        return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures);
  
                self.monitor_pending_revoke_and_ack = false;
                self.monitor_pending_commitment_signed = false;
-               (raa, commitment_update, self.monitor_pending_order.clone().unwrap(), forwards, failures)
+               let order = self.resend_order.clone();
+               log_trace!(self, "Restored monitor updating resulting in {} commitment update and {} RAA, with {} first",
+                       if commitment_update.is_some() { "a" } else { "no" },
+                       if raa.is_some() { "an" } else { "no" },
+                       match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
+               (raa, commitment_update, order, forwards, failures)
        }
  
        pub fn update_fee(&mut self, fee_estimator: &FeeEstimator, msg: &msgs::UpdateFee) -> Result<(), ChannelError> {
                        })
                } else { None };
  
-               let order = self.monitor_pending_order.clone().unwrap_or(if self.received_commitment_while_awaiting_raa {
-                               RAACommitmentOrder::CommitmentFirst
-                       } else {
-                               RAACommitmentOrder::RevokeAndACKFirst
-                       });
                if msg.next_local_commitment_number == our_next_remote_commitment_number {
                        if required_revoke.is_some() {
                                log_debug!(self, "Reconnected channel {} with only lost outbound RAA", log_bytes!(self.channel_id()));
                                log_debug!(self, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
                        }
  
-                       if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 &&
-                                       self.monitor_pending_order.is_none() { // monitor_pending_order indicates we're waiting on a response to a unfreeze
+                       if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) == 0 {
                                // We're up-to-date and not waiting on a remote revoke (if we are our
                                // channel_reestablish should result in them sending a revoke_and_ack), but we may
                                // have received some updates while we were disconnected. Free the holding cell
                                match self.free_holding_cell_htlcs() {
                                        Err(ChannelError::Close(msg)) => return Err(ChannelError::Close(msg)),
                                        Err(ChannelError::Ignore(_)) => panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
-                                       Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), order, shutdown_msg)),
-                                       Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg)),
+                                       Ok(Some((commitment_update, channel_monitor))) => return Ok((resend_funding_locked, required_revoke, Some(commitment_update), Some(channel_monitor), self.resend_order.clone(), shutdown_msg)),
+                                       Ok(None) => return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg)),
                                }
                        } else {
-                               return Ok((resend_funding_locked, required_revoke, None, None, order, shutdown_msg));
+                               return Ok((resend_funding_locked, required_revoke, None, None, self.resend_order.clone(), shutdown_msg));
                        }
                } else if msg.next_local_commitment_number == our_next_remote_commitment_number - 1 {
                        if required_revoke.is_some() {
  
                        if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) != 0 {
                                self.monitor_pending_commitment_signed = true;
-                               return Ok((resend_funding_locked, None, None, None, order, shutdown_msg));
+                               return Ok((resend_funding_locked, None, None, None, self.resend_order.clone(), shutdown_msg));
                        }
  
-                       return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, order, shutdown_msg));
+                       return Ok((resend_funding_locked, required_revoke, Some(self.get_last_commitment_update()), None, self.resend_order.clone(), shutdown_msg));
                } else {
                        return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction"));
                }
                                htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(fail_reason);
                        }
                }
+               self.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
  
                let (res, remote_commitment_tx, htlcs) = match self.send_commitment_no_state_update() {
                        Ok((res, (remote_commitment_tx, mut htlcs))) => {
@@@ -3558,8 -3550,6 +3543,6 @@@ impl Writeable for Channel 
                self.cur_remote_commitment_transaction_number.write(writer)?;
                self.value_to_self_msat.write(writer)?;
  
-               self.received_commitment_while_awaiting_raa.write(writer)?;
                let mut dropped_inbound_htlcs = 0;
                for htlc in self.pending_inbound_htlcs.iter() {
                        if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
                        }
                }
  
+               match self.resend_order {
+                       RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
+                       RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
+               }
                self.monitor_pending_revoke_and_ack.write(writer)?;
                self.monitor_pending_commitment_signed.write(writer)?;
-               match self.monitor_pending_order {
-                       None => 0u8.write(writer)?,
-                       Some(RAACommitmentOrder::CommitmentFirst) => 1u8.write(writer)?,
-                       Some(RAACommitmentOrder::RevokeAndACKFirst) => 2u8.write(writer)?,
-               }
  
                (self.monitor_pending_forwards.len() as u64).write(writer)?;
                for &(ref pending_forward, ref htlc_id) in self.monitor_pending_forwards.iter() {
@@@ -3763,8 -3753,6 +3746,6 @@@ impl<R : ::std::io::Read> ReadableArgs<
                let cur_remote_commitment_transaction_number = Readable::read(reader)?;
                let value_to_self_msat = Readable::read(reader)?;
  
-               let received_commitment_while_awaiting_raa = Readable::read(reader)?;
                let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
                let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, OUR_MAX_HTLCS as usize));
                for _ in 0..pending_inbound_htlc_count {
                        });
                }
  
-               let monitor_pending_revoke_and_ack = Readable::read(reader)?;
-               let monitor_pending_commitment_signed = Readable::read(reader)?;
-               let monitor_pending_order = match <u8 as Readable<R>>::read(reader)? {
-                       0 => None,
-                       1 => Some(RAACommitmentOrder::CommitmentFirst),
-                       2 => Some(RAACommitmentOrder::RevokeAndACKFirst),
+               let resend_order = match <u8 as Readable<R>>::read(reader)? {
+                       0 => RAACommitmentOrder::CommitmentFirst,
+                       1 => RAACommitmentOrder::RevokeAndACKFirst,
                        _ => return Err(DecodeError::InvalidValue),
                };
  
+               let monitor_pending_revoke_and_ack = Readable::read(reader)?;
+               let monitor_pending_commitment_signed = Readable::read(reader)?;
                let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
                let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, OUR_MAX_HTLCS as usize));
                for _ in 0..monitor_pending_forwards_count {
                        cur_remote_commitment_transaction_number,
                        value_to_self_msat,
  
-                       received_commitment_while_awaiting_raa,
                        pending_inbound_htlcs,
                        pending_outbound_htlcs,
                        holding_cell_htlc_updates,
  
+                       resend_order,
                        monitor_pending_revoke_and_ack,
                        monitor_pending_commitment_signed,
-                       monitor_pending_order,
                        monitor_pending_forwards,
                        monitor_pending_failures,