Merge pull request #674 from TheBlueMatt/2020-08-keyif-rand-names
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Wed, 26 Aug 2020 15:07:58 +0000 (08:07 -0700)
committerGitHub <noreply@github.com>
Wed, 26 Aug 2020 15:07:58 +0000 (08:07 -0700)
Simplify + clarify random-bytes-fetching from KeysInterface

1  2 
fuzz/src/chanmon_consistency.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_tests.rs
lightning/src/util/test_utils.rs

index 0fc77c58f56c4bd1c8d6d2584639da30bfeb9d42,16e8cdf65cf791f34ec47dcc63d25d4f2f180b72..65f721e484032c1965aea8c2a54141ec63e5e063
@@@ -34,7 -34,7 +34,7 @@@ use lightning::chain::transaction::OutP
  use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil,ChainWatchInterface};
  use lightning::chain::keysinterface::{KeysInterface, InMemoryChannelKeys};
  use lightning::ln::channelmonitor;
 -use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate};
 +use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, MonitorEvent};
  use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, PaymentSecret, ChannelManagerReadArgs};
  use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
  use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, UpdateAddHTLC, Init};
@@@ -135,15 -135,14 +135,14 @@@ impl channelmonitor::ManyChannelMonito
                self.update_ret.lock().unwrap().clone()
        }
  
 -      fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
 -              return self.simple_monitor.get_and_clear_pending_htlcs_updated();
 +      fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent> {
 +              return self.simple_monitor.get_and_clear_pending_monitor_events();
        }
  }
  
  struct KeyProvider {
        node_id: u8,
-       session_id: atomic::AtomicU8,
-       channel_id: atomic::AtomicU8,
+       rand_bytes_id: atomic::AtomicU8,
  }
  impl KeysInterface for KeyProvider {
        type ChanKeySigner = EnforcingChannelKeys;
                ))
        }
  
-       fn get_onion_rand(&self) -> (SecretKey, [u8; 32]) {
-               let id = self.session_id.fetch_add(1, atomic::Ordering::Relaxed);
-               (SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 10, self.node_id]).unwrap(),
-               [0; 32])
-       }
-       fn get_channel_id(&self) -> [u8; 32] {
-               let id = self.channel_id.fetch_add(1, atomic::Ordering::Relaxed);
+       fn get_secure_random_bytes(&self) -> [u8; 32] {
+               let id = self.rand_bytes_id.fetch_add(1, atomic::Ordering::Relaxed);
                [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id]
        }
  }
@@@ -202,7 -195,7 +195,7 @@@ pub fn do_test<Out: test_logger::Output
                        let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
                        let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
  
-                       let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
+                       let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) });
                        let mut config = UserConfig::default();
                        config.channel_options.fee_proportional_millionths = 0;
                        config.channel_options.announced_channel = true;
                        let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin));
                        let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
  
-                       let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
+                       let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU8::new(0) });
                        let mut config = UserConfig::default();
                        config.channel_options.fee_proportional_millionths = 0;
                        config.channel_options.announced_channel = true;
index 9d4ccac896cd764b4c9965916014e2e5313200c7,6079c47ee870e8d0d2e74e9d72cd27a31e074b6f..f7f138274331e1b167bb2158cd952c77e7d257c5
@@@ -386,6 -386,9 +386,6 @@@ pub(super) struct Channel<ChanSigner: C
  
        their_shutdown_scriptpubkey: Option<Script>,
  
 -      /// Used exclusively to broadcast the latest local state, mostly a historical quirk that this
 -      /// is here:
 -      channel_monitor: Option<ChannelMonitor<ChanSigner>>,
        commitment_secrets: CounterpartyCommitmentSecrets,
  
        network_sync: UpdateStatus,
@@@ -488,7 -491,7 +488,7 @@@ impl<ChanSigner: ChannelKeys> Channel<C
                        user_id: user_id,
                        config: config.channel_options.clone(),
  
-                       channel_id: keys_provider.get_channel_id(),
+                       channel_id: keys_provider.get_secure_random_bytes(),
                        channel_state: ChannelState::OurInitSent as u32,
                        channel_outbound: true,
                        secp_ctx: Secp256k1::new(),
  
                        their_shutdown_scriptpubkey: None,
  
 -                      channel_monitor: None,
                        commitment_secrets: CounterpartyCommitmentSecrets::new(),
  
                        network_sync: UpdateStatus::Fresh,
  
                        their_shutdown_scriptpubkey,
  
 -                      channel_monitor: None,
                        commitment_secrets: CounterpartyCommitmentSecrets::new(),
  
                        network_sync: UpdateStatus::Fresh,
                                payment_preimage: payment_preimage_arg.clone(),
                        }],
                };
 -              self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone(), logger).unwrap();
  
                if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 {
                        for pending_update in self.holding_cell_htlc_updates.iter() {
                        } }
                }
  
 -              self.channel_monitor = Some(create_monitor!());
                let channel_monitor = create_monitor!();
  
                self.channel_state = ChannelState::FundingSent as u32;
                        } }
                }
  
 -              self.channel_monitor = Some(create_monitor!());
                let channel_monitor = create_monitor!();
  
                assert_eq!(self.channel_state & (ChannelState::MonitorUpdateFailed as u32), 0); // We have no had any monitor(s) yet to fail update!
                                htlc_outputs: htlcs_and_sigs
                        }]
                };
 -              self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone(), logger).unwrap();
  
                for htlc in self.pending_inbound_htlcs.iter_mut() {
                        let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
                                secret: msg.per_commitment_secret,
                        }],
                };
 -              self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone(), logger).unwrap();
  
                // Update state now that we've passed all the can-fail calls...
                // (note that we may still fail to generate the new commitment_signed message, but that's
                self.user_id
        }
  
 -      /// May only be called after funding has been initiated (ie is_funding_initiated() is true)
 -      pub fn channel_monitor(&mut self) -> &mut ChannelMonitor<ChanSigner> {
 -              if self.channel_state < ChannelState::FundingSent as u32 {
 -                      panic!("Can't get a channel monitor until funding has been created");
 -              }
 -              self.channel_monitor.as_mut().unwrap()
 -      }
 -
        /// Guaranteed to be Some after both FundingLocked messages have been exchanged (and, thus,
        /// is_usable() returns true).
        /// Allowed in any state (including after shutdown)
                if header.bitcoin_hash() != self.last_block_connected {
                        self.last_block_connected = header.bitcoin_hash();
                        self.update_time_counter = cmp::max(self.update_time_counter, header.time);
 -                      if let Some(channel_monitor) = self.channel_monitor.as_mut() {
 -                              channel_monitor.last_block_hash = self.last_block_connected;
 -                      }
                        if self.funding_tx_confirmations > 0 {
                                if self.funding_tx_confirmations == self.minimum_depth as u64 {
                                        let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
                        self.funding_tx_confirmations = self.minimum_depth as u64 - 1;
                }
                self.last_block_connected = header.bitcoin_hash();
 -              if let Some(channel_monitor) = self.channel_monitor.as_mut() {
 -                      channel_monitor.last_block_hash = self.last_block_connected;
 -              }
                false
        }
  
                                their_revocation_point: self.their_cur_commitment_point.unwrap()
                        }]
                };
 -              self.channel_monitor.as_mut().unwrap().update_monitor_ooo(monitor_update.clone(), logger).unwrap();
                self.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
                Ok((res, monitor_update))
        }
@@@ -4217,6 -4242,8 +4217,6 @@@ impl<ChanSigner: ChannelKeys + Writeabl
                self.their_shutdown_scriptpubkey.write(writer)?;
  
                self.commitment_secrets.write(writer)?;
 -
 -              self.channel_monitor.as_ref().unwrap().write_for_disk(writer)?;
                Ok(())
        }
  }
@@@ -4371,6 -4398,13 +4371,6 @@@ impl<ChanSigner: ChannelKeys + Readable
                let their_shutdown_scriptpubkey = Readable::read(reader)?;
                let commitment_secrets = Readable::read(reader)?;
  
 -              let (monitor_last_block, channel_monitor) = Readable::read(reader)?;
 -              // We drop the ChannelMonitor's last block connected hash cause we don't actually bother
 -              // doing full block connection operations on the internal ChannelMonitor copies
 -              if monitor_last_block != last_block_connected {
 -                      return Err(DecodeError::InvalidValue);
 -              }
 -
                Ok(Channel {
                        user_id,
  
  
                        their_shutdown_scriptpubkey,
  
 -                      channel_monitor: Some(channel_monitor),
                        commitment_secrets,
  
                        network_sync: UpdateStatus::Fresh,
@@@ -4520,8 -4555,7 +4520,7 @@@ mod tests 
                fn get_channel_keys(&self, _inbound: bool, _channel_value_satoshis: u64) -> InMemoryChannelKeys {
                        self.chan_keys.clone()
                }
-               fn get_onion_rand(&self) -> (SecretKey, [u8; 32]) { panic!(); }
-               fn get_channel_id(&self) -> [u8; 32] { [0; 32] }
+               fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
        }
  
        fn public_from_secret_hex(secp_ctx: &Secp256k1<All>, hex: &str) -> PublicKey {
index 010505ef10afa116f2d446800d0bc50b327fb1f3,ccd0b00444ae3c4f5d6d48cb74933013ea6faa3f..fdbbb43da3dd104ee5bd6699937af92df6dc4f7d
@@@ -38,7 -38,7 +38,7 @@@ use bitcoin::secp256k1
  use chain::chaininterface::{BroadcasterInterface,ChainListener,FeeEstimator};
  use chain::transaction::OutPoint;
  use ln::channel::{Channel, ChannelError};
 -use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
 +use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateErr, ManyChannelMonitor, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
  use ln::features::{InitFeatures, NodeFeatures};
  use routing::router::{Route, RouteHop};
  use ln::msgs;
@@@ -1243,7 -1243,8 +1243,8 @@@ impl<ChanSigner: ChannelKeys, M: Deref
        // Only public for testing, this should otherwise never be called direcly
        pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32) -> Result<(), APIError> {
                log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
-               let (session_priv, prng_seed) = self.keys_manager.get_onion_rand();
+               let prng_seed = self.keys_manager.get_secure_random_bytes();
+               let session_priv = SecretKey::from_slice(&self.keys_manager.get_secure_random_bytes()[..]).expect("RNG is busted");
  
                let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
                        .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
                        Err(e) => { Err(APIError::APIMisuseError { err: e.err })}
                }
        }
 +
 +      /// Process pending events from the ManyChannelMonitor.
 +      fn process_pending_monitor_events(&self) {
 +              let mut failed_channels = Vec::new();
 +              {
 +                      for monitor_event in self.monitor.get_and_clear_pending_monitor_events() {
 +                              match monitor_event {
 +                                      MonitorEvent::HTLCEvent(htlc_update) => {
 +                                              if let Some(preimage) = htlc_update.payment_preimage {
 +                                                      log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
 +                                                      self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
 +                                              } else {
 +                                                      log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
 +                                                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
 +                                              }
 +                                      },
 +                                      MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
 +                                              let mut channel_lock = self.channel_state.lock().unwrap();
 +                                              let channel_state = &mut *channel_lock;
 +                                              let by_id = &mut channel_state.by_id;
 +                                              let short_to_id = &mut channel_state.short_to_id;
 +                                              let pending_msg_events = &mut channel_state.pending_msg_events;
 +                                              if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
 +                                                      if let Some(short_id) = chan.get_short_channel_id() {
 +                                                              short_to_id.remove(&short_id);
 +                                                      }
 +                                                      failed_channels.push(chan.force_shutdown(false));
 +                                                      if let Ok(update) = self.get_channel_update(&chan) {
 +                                                              pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                                      msg: update
 +                                                              });
 +                                                      }
 +                                              }
 +                                      },
 +                              }
 +                      }
 +              }
 +
 +              for failure in failed_channels.drain(..) {
 +                      self.finish_force_close_channel(failure);
 +              }
 +      }
  }
  
  impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> events::MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
                                L::Target: Logger,
  {
        fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
 -              // TODO: Event release to users and serialization is currently race-y: it's very easy for a
 -              // user to serialize a ChannelManager with pending events in it and lose those events on
 -              // restart. This is doubly true for the fail/fulfill-backs from monitor events!
 -              {
 -                      //TODO: This behavior should be documented.
 -                      for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
 -                              if let Some(preimage) = htlc_update.payment_preimage {
 -                                      log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
 -                                      self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
 -                              } else {
 -                                      log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
 -                                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
 -                              }
 -                      }
 -              }
 +              //TODO: This behavior should be documented. It's non-intuitive that we query
 +              // ChannelMonitors when clearing other events.
 +              self.process_pending_monitor_events();
  
                let mut ret = Vec::new();
                let mut channel_state = self.channel_state.lock().unwrap();
@@@ -3037,9 -3008,21 +3038,9 @@@ impl<ChanSigner: ChannelKeys, M: Deref
                                L::Target: Logger,
  {
        fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
 -              // TODO: Event release to users and serialization is currently race-y: it's very easy for a
 -              // user to serialize a ChannelManager with pending events in it and lose those events on
 -              // restart. This is doubly true for the fail/fulfill-backs from monitor events!
 -              {
 -                      //TODO: This behavior should be documented.
 -                      for htlc_update in self.monitor.get_and_clear_pending_htlcs_updated() {
 -                              if let Some(preimage) = htlc_update.payment_preimage {
 -                                      log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
 -                                      self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
 -                              } else {
 -                                      log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
 -                                      self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
 -                              }
 -                      }
 -              }
 +              //TODO: This behavior should be documented. It's non-intuitive that we query
 +              // ChannelMonitors when clearing other events.
 +              self.process_pending_monitor_events();
  
                let mut ret = Vec::new();
                let mut pending_events = self.pending_events.lock().unwrap();
@@@ -3122,6 -3105,21 +3123,6 @@@ impl<ChanSigner: ChannelKeys, M: Deref 
                                                }
                                        }
                                }
 -                              if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height, &self.logger) {
 -                                      if let Some(short_id) = channel.get_short_channel_id() {
 -                                              short_to_id.remove(&short_id);
 -                                      }
 -                                      // If would_broadcast_at_height() is true, the channel_monitor will broadcast
 -                                      // the latest local tx for us, so we should skip that here (it doesn't really
 -                                      // hurt anything, but does make tests a bit simpler).
 -                                      failed_channels.push(channel.force_shutdown(false));
 -                                      if let Ok(update) = self.get_channel_update(&channel) {
 -                                              pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                                                      msg: update
 -                                              });
 -                                      }
 -                                      return false;
 -                              }
                                true
                        });
  
index 7a410c8bf7267550a81da22e460176079e8f783e,d21f558a65f5c365414171005167ea82ad55de84..4b111f2af455b1a3842965cad2941786ec608383
@@@ -2420,21 -2420,13 +2420,21 @@@ fn channel_monitor_network_test() 
        // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
        // buffer space).
  
 -      {
 +      let (close_chan_update_1, close_chan_update_2) = {
                let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                nodes[3].block_notifier.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
                for i in 3..TEST_FINAL_CLTV + 2 + LATENCY_GRACE_PERIOD_BLOCKS + 1 {
                        header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        nodes[3].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
                }
 +              let events = nodes[3].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              let close_chan_update_1 = match events[0] {
 +                      MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
 +                              msg.clone()
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              };
                check_added_monitors!(nodes[3], 1);
  
                // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
                        header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        nodes[4].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
                }
 -
 +              let events = nodes[4].node.get_and_clear_pending_msg_events();
 +              assert_eq!(events.len(), 1);
 +              let close_chan_update_2 = match events[0] {
 +                      MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
 +                              msg.clone()
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              };
                check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
  
                nodes[4].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
  
                check_preimage_claim(&nodes[4], &node_txn);
 -      }
 -      get_announce_close_broadcast_events(&nodes, 3, 4);
 +              (close_chan_update_1, close_chan_update_2)
 +      };
 +      nodes[3].net_graph_msg_handler.handle_channel_update(&close_chan_update_2).unwrap();
 +      nodes[4].net_graph_msg_handler.handle_channel_update(&close_chan_update_1).unwrap();
        assert_eq!(nodes[3].node.list_channels().len(), 0);
        assert_eq!(nodes[4].node.list_channels().len(), 0);
  }
@@@ -6143,7 -6126,7 +6143,7 @@@ fn test_onion_failure() 
        let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
        let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
        for node in nodes.iter() {
-               *node.keys_manager.override_session_priv.lock().unwrap() = Some(SecretKey::from_slice(&[3; 32]).unwrap());
+               *node.keys_manager.override_session_priv.lock().unwrap() = Some([3; 32]);
        }
        let channels = [create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()), create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known())];
        let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
index 5d14b861f5c5050f6fef3db2b34009c74959778a,559277d2a68c674948479d329861e2471d719c81..78263ecacad311ff8590d62aa740fc9cbb412794
@@@ -15,7 -15,7 +15,7 @@@ use ln::channelmonitor
  use ln::features::{ChannelFeatures, InitFeatures};
  use ln::msgs;
  use ln::msgs::OptionalField;
 -use ln::channelmonitor::HTLCUpdate;
 +use ln::channelmonitor::MonitorEvent;
  use util::enforcing_trait_impls::EnforcingChannelKeys;
  use util::events;
  use util::logger::{Logger, Level, Record};
@@@ -129,8 -129,8 +129,8 @@@ impl<'a> channelmonitor::ManyChannelMon
                ret
        }
  
 -      fn get_and_clear_pending_htlcs_updated(&self) -> Vec<HTLCUpdate> {
 -              return self.simple_monitor.get_and_clear_pending_htlcs_updated();
 +      fn get_and_clear_pending_monitor_events(&self) -> Vec<MonitorEvent> {
 +              return self.simple_monitor.get_and_clear_pending_monitor_events();
        }
  }
  
@@@ -350,7 -350,7 +350,7 @@@ impl Logger for TestLogger 
  
  pub struct TestKeysInterface {
        backing: keysinterface::KeysManager,
-       pub override_session_priv: Mutex<Option<SecretKey>>,
+       pub override_session_priv: Mutex<Option<[u8; 32]>>,
        pub override_channel_id_priv: Mutex<Option<[u8; 32]>>,
  }
  
@@@ -364,18 -364,19 +364,19 @@@ impl keysinterface::KeysInterface for T
                EnforcingChannelKeys::new(self.backing.get_channel_keys(inbound, channel_value_satoshis))
        }
  
-       fn get_onion_rand(&self) -> (SecretKey, [u8; 32]) {
-               match *self.override_session_priv.lock().unwrap() {
-                       Some(key) => (key.clone(), [0; 32]),
-                       None => self.backing.get_onion_rand()
+       fn get_secure_random_bytes(&self) -> [u8; 32] {
+               let override_channel_id = self.override_channel_id_priv.lock().unwrap();
+               let override_session_key = self.override_session_priv.lock().unwrap();
+               if override_channel_id.is_some() && override_session_key.is_some() {
+                       panic!("We don't know which override key to use!");
                }
-       }
-       fn get_channel_id(&self) -> [u8; 32] {
-               match *self.override_channel_id_priv.lock().unwrap() {
-                       Some(key) => key.clone(),
-                       None => self.backing.get_channel_id()
+               if let Some(key) = &*override_channel_id {
+                       return *key;
+               }
+               if let Some(key) = &*override_session_key {
+                       return *key;
                }
+               self.backing.get_secure_random_bytes()
        }
  }