Merge pull request #890 from TheBlueMatt/2021-04-fix-chan-shutdown-crash
[rust-lightning] / lightning / src / ln / channelmanager.rs
index a759443d86ed9ab56374c3d44d24ad8affa6a5a9..80edbacf573a7a545107a7908698fb0bf371cb13 100644 (file)
@@ -18,7 +18,8 @@
 //! imply it needs to fail HTLCs/payments/channels it manages).
 //!
 
-use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::transaction::Transaction;
 use bitcoin::blockdata::constants::genesis_block;
 use bitcoin::network::constants::Network;
 
@@ -27,7 +28,7 @@ use bitcoin::hashes::hmac::{Hmac, HmacEngine};
 use bitcoin::hashes::sha256::Hash as Sha256;
 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
 use bitcoin::hashes::cmp::fixed_time_eq;
-use bitcoin::hash_types::BlockHash;
+use bitcoin::hash_types::{BlockHash, Txid};
 
 use bitcoin::secp256k1::key::{SecretKey,PublicKey};
 use bitcoin::secp256k1::Secp256k1;
@@ -35,10 +36,14 @@ use bitcoin::secp256k1::ecdh::SharedSecret;
 use bitcoin::secp256k1;
 
 use chain;
+use chain::Confirm;
 use chain::Watch;
 use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
 use chain::transaction::{OutPoint, TransactionData};
+// Since this struct is returned in `list_channels` methods, expose it here in case users want to
+// construct one themselves.
+pub use ln::channel::CounterpartyForwardingInfo;
 use ln::channel::{Channel, ChannelError};
 use ln::features::{InitFeatures, NodeFeatures};
 use routing::router::{Route, RouteHop};
@@ -206,7 +211,7 @@ pub struct PaymentPreimage(pub [u8;32]);
 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
 pub struct PaymentSecret(pub [u8;32]);
 
-type ShutdownResult = (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
 
 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
@@ -333,15 +338,21 @@ pub(super) struct ChannelHolder<Signer: Sign> {
        pub(super) pending_msg_events: Vec<MessageSendEvent>,
 }
 
+/// Events which we process internally but cannot be procsesed immediately at the generation site
+/// for some reason. They are handled in timer_tick_occurred, so may be processed with
+/// quite some time lag.
+enum BackgroundEvent {
+       /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
+       /// commitment transaction.
+       ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
+}
+
 /// State we hold per-peer. In the future we should put channels in here, but for now we only hold
 /// the latest Init features we heard from the peer.
 struct PeerState {
        latest_features: InitFeatures,
 }
 
-#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
-const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
-
 /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
 /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
@@ -349,7 +360,7 @@ const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assum
 /// issues such as overly long function definitions. Note that the ChannelManager can take any
 /// type that implements KeysInterface for its keys manager, but this type alias chooses the
 /// concrete type of the KeysManager.
-pub type SimpleArcChannelManager<M, T, F, L> = Arc<ChannelManager<InMemorySigner, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>>;
+pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
 
 /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
 /// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
@@ -380,7 +391,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 /// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
 /// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
 ///
-/// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which
+/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which
 /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
 /// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
 /// block_connected() to step towards your best block) upon deserialization before using the
@@ -390,7 +401,7 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage
 /// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
 /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
 /// offline for a full minute. In order to track this, you must call
-/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
+/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
 ///
 /// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
 /// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
@@ -411,10 +422,9 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        tx_broadcaster: T,
 
        #[cfg(test)]
-       pub(super) latest_block_height: AtomicUsize,
+       pub(super) best_block: RwLock<BestBlock>,
        #[cfg(not(test))]
-       latest_block_height: AtomicUsize,
-       last_block_hash: Mutex<BlockHash>,
+       best_block: RwLock<BestBlock>,
        secp_ctx: Secp256k1<secp256k1::All>,
 
        #[cfg(any(test, feature = "_test_utils"))]
@@ -422,6 +432,7 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        #[cfg(not(any(test, feature = "_test_utils")))]
        channel_state: Mutex<ChannelHolder<Signer>>,
        our_network_key: SecretKey,
+       our_network_pubkey: PublicKey,
 
        /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
        /// value increases strictly since we don't assume access to a time source.
@@ -436,6 +447,7 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
 
        pending_events: Mutex<Vec<events::Event>>,
+       pending_background_events: Mutex<Vec<BackgroundEvent>>,
        /// Used when we have to take a BIG lock to make sure everything is self-consistent.
        /// Essentially just when we're serializing ourselves out.
        /// Taken first everywhere where we are making changes before any other locks.
@@ -451,11 +463,55 @@ pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref,
        logger: L,
 }
 
+/// Chain-related parameters used to construct a new `ChannelManager`.
+///
+/// Typically, the block-specific parameters are derived from the best block hash for the network,
+/// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
+/// are not needed when deserializing a previously constructed `ChannelManager`.
+pub struct ChainParameters {
+       /// The network for determining the `chain_hash` in Lightning messages.
+       pub network: Network,
+
+       /// The hash and height of the latest block successfully connected.
+       ///
+       /// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
+       pub best_block: BestBlock,
+}
+
+/// The best known block as identified by its hash and height.
+#[derive(Clone, Copy)]
+pub struct BestBlock {
+       block_hash: BlockHash,
+       height: u32,
+}
+
+impl BestBlock {
+       /// Returns the best block from the genesis of the given network.
+       pub fn from_genesis(network: Network) -> Self {
+               BestBlock {
+                       block_hash: genesis_block(network).header.block_hash(),
+                       height: 0,
+               }
+       }
+
+       /// Returns the best block as identified by the given block hash and height.
+       pub fn new(block_hash: BlockHash, height: u32) -> Self {
+               BestBlock { block_hash, height }
+       }
+
+       /// Returns the best block hash.
+       pub fn block_hash(&self) -> BlockHash { self.block_hash }
+
+       /// Returns the best block height.
+       pub fn height(&self) -> u32 { self.height }
+}
+
 /// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
-/// desirable to notify any listeners on `wait_timeout`/`wait` that new updates are available for
-/// persistence. Therefore, this struct is responsible for locking the total consistency lock and,
-/// upon going out of scope, sending the aforementioned notification (since the lock being released
-/// indicates that the updates are ready for persistence).
+/// desirable to notify any listeners on `await_persistable_update_timeout`/
+/// `await_persistable_update` that new updates are available for persistence. Therefore, this
+/// struct is responsible for locking the total consistency lock and, upon going out of scope,
+/// sending the aforementioned notification (since the lock being released indicates that the
+/// updates are ready for persistence).
 struct PersistenceNotifierGuard<'a> {
        persistence_notifier: &'a PersistenceNotifier,
        // We hold onto this result so the lock doesn't get released immediately.
@@ -479,18 +535,28 @@ impl<'a> Drop for PersistenceNotifierGuard<'a> {
        }
 }
 
-/// The amount of time we require our counterparty wait to claim their money (ie time between when
-/// we, or our watchtower, must check for them having broadcast a theft transaction).
-pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
-/// The amount of time we're willing to wait to claim money back to us
-pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7;
+/// The amount of time in blocks we require our counterparty wait to claim their money (ie time
+/// between when we, or our watchtower, must check for them having broadcast a theft transaction).
+///
+/// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
+///
+/// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
+pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
+/// The amount of time in blocks we're willing to wait to claim money back to us. This matches
+/// the maximum required amount in lnd as of March 2021.
+pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
 
 /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
-/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
-/// ie the node we forwarded the payment on to should always have enough room to reliably time out
-/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
-/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
-const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
+/// HTLC's CLTV. The current default represents roughly six hours of blocks at six blocks/hour.
+///
+/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
+///
+/// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
+// This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
+// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
+// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
+// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
+pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 6;
 pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
 
 // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
@@ -501,13 +567,13 @@ pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
 // LATENCY_GRACE_PERIOD_BLOCKS.
 #[deny(const_err)]
 #[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
 
 // Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
 // ChannelMontior::would_broadcast_at_height for a description of why this is needed.
 #[deny(const_err)]
 #[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
 
 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
 #[derive(Clone)]
@@ -544,6 +610,10 @@ pub struct ChannelDetails {
        /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
        /// the peer is connected, and (c) no monitor update failure is pending resolution.
        pub is_live: bool,
+
+       /// Information on the fees and requirements that the counterparty requires when forwarding
+       /// payments to us through this channel.
+       pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
 }
 
 /// If a payment fails to send, it can be in one of several states. This enum is returned as the
@@ -760,24 +830,21 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        ///
        /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
        ///
-       /// Users must provide the current blockchain height from which to track onchain channel
-       /// funding outpoints and send payments with reliable timelocks.
-       ///
        /// Users need to notify the new ChannelManager when a new block is connected or
-       /// disconnected using its `block_connected` and `block_disconnected` methods.
-       pub fn new(network: Network, fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self {
-               let secp_ctx = Secp256k1::new();
+       /// disconnected using its `block_connected` and `block_disconnected` methods, starting
+       /// from after `params.latest_hash`.
+       pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self {
+               let mut secp_ctx = Secp256k1::new();
+               secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
 
                ChannelManager {
                        default_configuration: config.clone(),
-                       genesis_hash: genesis_block(network).header.block_hash(),
+                       genesis_hash: genesis_block(params.network).header.block_hash(),
                        fee_estimator: fee_est,
                        chain_monitor,
                        tx_broadcaster,
 
-                       latest_block_height: AtomicUsize::new(current_blockchain_height),
-                       last_block_hash: Mutex::new(Default::default()),
-                       secp_ctx,
+                       best_block: RwLock::new(params.best_block),
 
                        channel_state: Mutex::new(ChannelHolder{
                                by_id: HashMap::new(),
@@ -787,12 +854,15 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                pending_msg_events: Vec::new(),
                        }),
                        our_network_key: keys_manager.get_node_secret(),
+                       our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
+                       secp_ctx,
 
                        last_node_announcement_serial: AtomicUsize::new(0),
 
                        per_peer_state: RwLock::new(HashMap::new()),
 
                        pending_events: Mutex::new(Vec::new()),
+                       pending_background_events: Mutex::new(Vec::new()),
                        total_consistency_lock: RwLock::new(()),
                        persistence_notifier: PersistenceNotifier::new(),
 
@@ -802,12 +872,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
+       /// Gets the current configuration applied to all new channels,  as
+       pub fn get_current_default_configuration(&self) -> &UserConfig {
+               &self.default_configuration
+       }
+
        /// Creates a new outbound channel to the given remote node and with the given value.
        ///
-       /// user_id will be provided back as user_channel_id in FundingGenerationReady and
-       /// FundingBroadcastSafe events to allow tracking of which events correspond with which
-       /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
-       /// may wish to avoid using 0 for user_id here.
+       /// user_id will be provided back as user_channel_id in FundingGenerationReady events to allow
+       /// tracking of which events correspond with which create_channel call. Note that the
+       /// user_channel_id defaults to 0 for inbound channels, so you may wish to avoid using 0 for
+       /// user_id here. user_id has no meaning inside of LDK, it is simply copied to events and
+       /// otherwise ignored.
        ///
        /// If successful, will generate a SendOpenChannel message event, so you should probably poll
        /// PeerManager::process_events afterwards.
@@ -862,6 +938,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        outbound_capacity_msat,
                                        user_id: channel.get_user_id(),
                                        is_live: channel.is_live(),
+                                       counterparty_forwarding_info: channel.counterparty_forwarding_info(),
                                });
                        }
                }
@@ -941,12 +1018,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        #[inline]
        fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
-               let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res;
+               let (monitor_update_option, mut failed_htlcs) = shutdown_res;
                log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
                for htlc_source in failed_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
                }
-               if let Some(funding_txo) = funding_txo_option {
+               if let Some((funding_txo, monitor_update)) = monitor_update_option {
                        // There isn't anything we can do if we get an update failure - we're already
                        // force-closing. The monitor update on the required in-memory copy should broadcast
                        // the latest local state, which is the best we can do anyway. Thus, it is safe to
@@ -955,16 +1032,14 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> {
+       fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
                                if let Some(node_id) = peer_node_id {
                                        if chan.get().get_counterparty_node_id() != *node_id {
-                                               // Error or Ok here doesn't matter - the result is only exposed publicly
-                                               // when peer_node_id is None anyway.
-                                               return Ok(());
+                                               return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
                                        }
                                }
                                if let Some(short_id) = chan.get().get_short_channel_id() {
@@ -984,14 +1059,27 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        });
                }
 
-               Ok(())
+               Ok(chan.get_counterparty_node_id())
        }
 
        /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
        /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
        pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-               self.force_close_channel_with_peer(channel_id, None)
+               match self.force_close_channel_with_peer(channel_id, None) {
+                       Ok(counterparty_node_id) => {
+                               self.channel_state.lock().unwrap().pending_msg_events.push(
+                                       events::MessageSendEvent::HandleError {
+                                               node_id: counterparty_node_id,
+                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                       msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+                                               },
+                                       }
+                               );
+                               Ok(())
+                       },
+                       Err(e) => Err(e)
+               }
        }
 
        /// Force close all channels, immediately broadcasting the latest local commitment transaction
@@ -1109,7 +1197,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                // HTLC_FAIL_BACK_BUFFER blocks to go.
                                // Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
                                // before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
-                               if (msg.cltv_expiry as u64) <= self.latest_block_height.load(Ordering::Acquire) as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+                               if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
                                        return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
                                }
                                // final_incorrect_htlc_amount
@@ -1229,10 +1317,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
                                                break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
                                        }
-                                       if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+                                       if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
                                                break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
                                        }
-                                       let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+                                       let cur_height = self.best_block.read().unwrap().height() + 1;
                                        // Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
                                        // packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
                                        if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
@@ -1287,7 +1375,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        short_channel_id,
                        timestamp: chan.get_update_time_counter(),
                        flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
-                       cltv_expiry_delta: CLTV_EXPIRY_DELTA,
+                       cltv_expiry_delta: chan.get_cltv_expiry_delta(),
                        htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
                        htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
                        fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator),
@@ -1449,7 +1537,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        return Err(PaymentSendFailure::PathParameterError(path_errs));
                }
 
-               let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+               let cur_height = self.best_block.read().unwrap().height() + 1;
                let mut results = Vec::new();
                for path in route.paths.iter() {
                        results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height));
@@ -1476,34 +1564,30 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       /// Call this upon creation of a funding transaction for the given channel.
-       ///
-       /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
-       /// or your counterparty can steal your funds!
-       ///
-       /// Panics if a funding transaction has already been provided for this channel.
-       ///
-       /// May panic if the funding_txo is duplicative with some other channel (note that this should
-       /// be trivially prevented by using unique funding transaction keys per-channel).
-       pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
-               let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-
+       /// Handles the generation of a funding transaction, optionally (for tests) with a function
+       /// which checks the correctness of the funding transaction given the associated channel.
+       fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>
+                       (&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, find_funding_output: FundingOutput) -> Result<(), APIError> {
                let (chan, msg) = {
                        let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
                                Some(mut chan) => {
-                                       (chan.get_outbound_funding_created(funding_txo, &self.logger)
+                                       let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+                                       (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
                                                .map_err(|e| if let ChannelError::Close(msg) = e {
                                                        MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
                                                } else { unreachable!(); })
                                        , chan)
                                },
-                               None => return
+                               None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
                        };
                        match handle_error!(self, res, chan.get_counterparty_node_id()) {
                                Ok(funding_msg) => {
                                        (chan, funding_msg)
                                },
-                               Err(_) => { return; }
+                               Err(_) => { return Err(APIError::ChannelUnavailable {
+                                       err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+                               }) },
                        }
                };
 
@@ -1520,6 +1604,69 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                e.insert(chan);
                        }
                }
+               Ok(())
+       }
+
+       #[cfg(test)]
+       pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
+               self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |_, tx| {
+                       Ok(OutPoint { txid: tx.txid(), index: output_index })
+               })
+       }
+
+       /// Call this upon creation of a funding transaction for the given channel.
+       ///
+       /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
+       /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
+       ///
+       /// Panics if a funding transaction has already been provided for this channel.
+       ///
+       /// May panic if the output found in the funding transaction is duplicative with some other
+       /// channel (note that this should be trivially prevented by using unique funding transaction
+       /// keys per-channel).
+       ///
+       /// Do NOT broadcast the funding transaction yourself. When we have safely received our
+       /// counterparty's signature the funding transaction will automatically be broadcast via the
+       /// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
+       ///
+       /// Note that this includes RBF or similar transaction replacement strategies - lightning does
+       /// not currently support replacing a funding transaction on an existing channel. Instead,
+       /// create a new channel with a conflicting funding transaction.
+       pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
+               let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+
+               for inp in funding_transaction.input.iter() {
+                       if inp.witness.is_empty() {
+                               return Err(APIError::APIMisuseError {
+                                       err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
+                               });
+                       }
+               }
+               self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |chan, tx| {
+                       let mut output_index = None;
+                       let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
+                       for (idx, outp) in tx.output.iter().enumerate() {
+                               if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
+                                       if output_index.is_some() {
+                                               return Err(APIError::APIMisuseError {
+                                                       err: "Multiple outputs matched the expected script and value".to_owned()
+                                               });
+                                       }
+                                       if idx > u16::max_value() as usize {
+                                               return Err(APIError::APIMisuseError {
+                                                       err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+                                               });
+                                       }
+                                       output_index = Some(idx as u16);
+                               }
+                       }
+                       if output_index.is_none() {
+                               return Err(APIError::APIMisuseError {
+                                       err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
+                               });
+                       }
+                       Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() })
+               })
        }
 
        fn get_announcement_sigs(&self, chan: &Channel<Signer>) -> Option<msgs::AnnouncementSignatures> {
@@ -1799,10 +1946,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                                for htlc in htlcs.iter() {
                                                                                        let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                                                                        htlc_msat_height_data.extend_from_slice(
-                                                                                               &byte_utils::be32_to_array(
-                                                                                                       self.latest_block_height.load(Ordering::Acquire)
-                                                                                                               as u32,
-                                                                                               ),
+                                                                                               &byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
                                                                                        );
                                                                                        failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
                                                                                                        short_channel_id: htlc.prev_hop.short_channel_id,
@@ -1853,13 +1997,42 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                events.append(&mut new_events);
        }
 
+       /// Free the background events, generally called from timer_tick_occurred.
+       ///
+       /// Exposed for testing to allow us to process events quickly without generating accidental
+       /// BroadcastChannelUpdate events in timer_tick_occurred.
+       ///
+       /// Expects the caller to have a total_consistency_lock read lock.
+       fn process_background_events(&self) {
+               let mut background_events = Vec::new();
+               mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
+               for event in background_events.drain(..) {
+                       match event {
+                               BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
+                                       // The channel has already been closed, so no use bothering to care about the
+                                       // monitor updating completing.
+                                       let _ = self.chain_monitor.update_channel(funding_txo, update);
+                               },
+                       }
+               }
+       }
+
+       #[cfg(any(test, feature = "_test_utils"))]
+       pub(crate) fn test_process_background_events(&self) {
+               self.process_background_events();
+       }
+
        /// If a peer is disconnected we mark any channels with that peer as 'disabled'.
        /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
        /// to inform the network about the uselessness of these channels.
        ///
        /// This method handles all the details, and must be called roughly once per minute.
-       pub fn timer_chan_freshness_every_min(&self) {
+       ///
+       /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
+       pub fn timer_tick_occurred(&self) {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+               self.process_background_events();
+
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
                for (_, chan) in channel_state.by_id.iter_mut() {
@@ -1893,8 +2066,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
                                let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
-                                       self.latest_block_height.load(Ordering::Acquire) as u32,
-                               ));
+                                               self.best_block.read().unwrap().height()));
                                self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
                                                HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
                                                HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
@@ -1952,6 +2124,10 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                //identify whether we sent it or not based on the (I presume) very different runtime
                //between the branches here. We should make this async and move it into the forward HTLCs
                //timer handling.
+
+               // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+               // from block_connected which may run during initialization prior to the chain_monitor
+               // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
                match source {
                        HTLCSource::OutboundRoute { ref path, .. } => {
                                log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
@@ -2104,8 +2280,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                if (is_mpp && !valid_mpp) || (!is_mpp && (htlc.value < expected_amount || htlc.value > expected_amount * 2)) {
                                        let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
                                        htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
-                                               self.latest_block_height.load(Ordering::Acquire) as u32,
-                                       ));
+                                                       self.best_block.read().unwrap().height()));
                                        self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
                                                                         HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
                                                                         HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
@@ -2235,7 +2410,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        /// Gets the node_id held by this ChannelManager
        pub fn get_our_node_id(&self) -> PublicKey {
-               PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
+               self.our_network_pubkey.clone()
        }
 
        /// Restores a single, given channel to normal operation after a
@@ -2279,7 +2454,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                return;
                        }
 
-                       let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(&self.logger);
+                       let (raa, commitment_update, order, pending_forwards, mut pending_failures, funding_broadcastable, funding_locked) = channel.monitor_updating_restored(&self.logger);
                        if !pending_forwards.is_empty() {
                                htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards));
                        }
@@ -2311,11 +2486,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        handle_cs!();
                                },
                        }
-                       if needs_broadcast_safe {
-                               pending_events.push(events::Event::FundingBroadcastSafe {
-                                       funding_txo: channel.get_funding_txo().unwrap(),
-                                       user_channel_id: channel.get_user_id(),
-                               });
+                       if let Some(tx) = funding_broadcastable {
+                               self.tx_broadcaster.broadcast_transaction(&tx);
                        }
                        if let Some(msg) = funding_locked {
                                pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
@@ -2393,6 +2565,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
 
        fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
                let ((funding_msg, monitor), mut chan) = {
+                       let best_block = *self.best_block.read().unwrap();
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
@@ -2400,7 +2573,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
                                        }
-                                       (try_chan_entry!(self, chan.get_mut().funding_created(msg, &self.logger), channel_state, chan), chan.remove())
+                                       (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove())
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
                        }
@@ -2417,7 +2590,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        // We do not do a force-close here as that would generate a monitor update for
                                        // a monitor that we didn't manage to store (and that we don't care about - we
                                        // don't respond with the funding_signed so the channel can never go on chain).
-                                       let (_funding_txo_option, _monitor_update, failed_htlcs) = chan.force_shutdown(true);
+                                       let (_monitor_update, failed_htlcs) = chan.force_shutdown(true);
                                        assert!(failed_htlcs.is_empty());
                                        return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
                                },
@@ -2448,7 +2621,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
        }
 
        fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
-               let (funding_txo, user_id) = {
+               let funding_tx = {
+                       let best_block = *self.best_block.read().unwrap();
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                        match channel_state.by_id.entry(msg.channel_id) {
@@ -2456,23 +2630,19 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let monitor = match chan.get_mut().funding_signed(&msg, &self.logger) {
+                                       let (monitor, funding_tx) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
                                                return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
                                        }
-                                       (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
+                                       funding_tx
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               let mut pending_events = self.pending_events.lock().unwrap();
-               pending_events.push(events::Event::FundingBroadcastSafe {
-                       funding_txo,
-                       user_channel_id: user_id,
-               });
+               self.tx_broadcaster.broadcast_transaction(&funding_tx);
                Ok(())
        }
 
@@ -2507,7 +2677,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                }
        }
 
-       fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
+       fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
                let (mut dropped_htlcs, chan_option) = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
@@ -2517,7 +2687,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                        if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &msg), channel_state, chan_entry);
+                                       let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry);
                                        if let Some(msg) = shutdown {
                                                channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
                                                        node_id: counterparty_node_id.clone(),
@@ -2929,6 +3099,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                Ok(())
        }
 
+       fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
+               let mut channel_state_lock = self.channel_state.lock().unwrap();
+               let channel_state = &mut *channel_state_lock;
+               let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
+                       Some(chan_id) => chan_id.clone(),
+                       None => {
+                               // It's not a local channel
+                               return Ok(())
+                       }
+               };
+               match channel_state.by_id.entry(chan_id) {
+                       hash_map::Entry::Occupied(mut chan) => {
+                               if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+                                       // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
+                                       return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
+                               }
+                               try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+                       },
+                       hash_map::Entry::Vacant(_) => unreachable!()
+               }
+               Ok(())
+       }
+
        fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
                let mut channel_state_lock = self.channel_state.lock().unwrap();
                let channel_state = &mut *channel_state_lock;
@@ -3089,6 +3282,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                                        msg: update
                                                                });
                                                        }
+                                                       pending_msg_events.push(events::MessageSendEvent::HandleError {
+                                                               node_id: chan.get_counterparty_node_id(),
+                                                               action: msgs::ErrorAction::SendErrorMessage {
+                                                                       msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+                                                               },
+                                                       });
                                                }
                                        },
                                }
@@ -3099,6 +3298,29 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        self.finish_force_close_channel(failure);
                }
        }
+
+       /// Handle a list of channel failures during a block_connected or block_disconnected call,
+       /// pushing the channel monitor update (if any) to the background events queue and removing the
+       /// Channel object.
+       fn handle_init_event_channel_failures(&self, mut failed_channels: Vec<ShutdownResult>) {
+               for mut failure in failed_channels.drain(..) {
+                       // Either a commitment transactions has been confirmed on-chain or
+                       // Channel::block_disconnected detected that the funding transaction has been
+                       // reorganized out of the main chain.
+                       // We cannot broadcast our latest local state via monitor update (as
+                       // Channel::force_shutdown tries to make us do) as we may still be in initialization,
+                       // so we track the update internally and handle it when the user next calls
+                       // timer_tick_occurred, guaranteeing we're running normally.
+                       if let Some((funding_txo, update)) = failure.0.take() {
+                               assert_eq!(update.updates.len(), 1);
+                               if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
+                                       assert!(should_broadcast);
+                               } else { unreachable!(); }
+                               self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
+                       }
+                       self.finish_force_close_channel(failure);
+               }
+       }
 }
 
 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
@@ -3139,18 +3361,132 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvi
        }
 }
 
-impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
-       where M::Target: chain::Watch<Signer>,
-        T::Target: BroadcasterInterface,
-        K::Target: KeysInterface<Signer = Signer>,
-        F::Target: FeeEstimator,
-        L::Target: Logger,
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Listen for ChannelManager<Signer, M, T, K, F, L>
+where
+       M::Target: chain::Watch<Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface<Signer = Signer>,
+       F::Target: FeeEstimator,
+       L::Target: Logger,
 {
-       /// Updates channel state based on transactions seen in a connected block.
-       pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
-               let header_hash = header.block_hash();
-               log_trace!(self.logger, "Block {} at height {} connected", header_hash, height);
+       fn block_connected(&self, block: &Block, height: u32) {
+               {
+                       let best_block = self.best_block.read().unwrap();
+                       assert_eq!(best_block.block_hash(), block.header.prev_blockhash,
+                               "Blocks must be connected in chain-order - the connected header must build on the last connected header");
+                       assert_eq!(best_block.height(), height - 1,
+                               "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
+               }
+
+               let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+               self.transactions_confirmed(&block.header, &txdata, height);
+               self.best_block_updated(&block.header, height);
+       }
+
+       fn block_disconnected(&self, header: &BlockHeader, height: u32) {
+               let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+               let new_height = height - 1;
+               {
+                       let mut best_block = self.best_block.write().unwrap();
+                       assert_eq!(best_block.block_hash(), header.block_hash(),
+                               "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
+                       assert_eq!(best_block.height(), height,
+                               "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
+                       *best_block = BestBlock::new(header.prev_blockhash, new_height)
+               }
+
+               self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time));
+       }
+}
+
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Confirm for ChannelManager<Signer, M, T, K, F, L>
+where
+       M::Target: chain::Watch<Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface<Signer = Signer>,
+       F::Target: FeeEstimator,
+       L::Target: Logger,
+{
+       fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+               // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+               // during initialization prior to the chain_monitor being fully configured in some cases.
+               // See the docs for `ChannelManagerReadArgs` for more.
+
+               let block_hash = header.block_hash();
+               log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
+
+               let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+               self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new())));
+       }
+
+       fn best_block_updated(&self, header: &BlockHeader, height: u32) {
+               // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+               // during initialization prior to the chain_monitor being fully configured in some cases.
+               // See the docs for `ChannelManagerReadArgs` for more.
+
+               let block_hash = header.block_hash();
+               log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
+
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+
+               *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
+
+               self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time));
+
+               loop {
+                       // Update last_node_announcement_serial to be the max of its current value and the
+                       // block timestamp. This should keep us close to the current time without relying on
+                       // having an explicit local time source.
+                       // Just in case we end up in a race, we loop until we either successfully update
+                       // last_node_announcement_serial or decide we don't need to.
+                       let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire);
+                       if old_serial >= header.time as usize { break; }
+                       if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
+                               break;
+                       }
+               }
+       }
+
+       fn get_relevant_txids(&self) -> Vec<Txid> {
+               let channel_state = self.channel_state.lock().unwrap();
+               let mut res = Vec::with_capacity(channel_state.short_to_id.len());
+               for chan in channel_state.by_id.values() {
+                       if let Some(funding_txo) = chan.get_funding_txo() {
+                               res.push(funding_txo.txid);
+                       }
+               }
+               res
+       }
+
+       fn transaction_unconfirmed(&self, txid: &Txid) {
+               let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+               self.do_chain_event(None, |channel| {
+                       if let Some(funding_txo) = channel.get_funding_txo() {
+                               if funding_txo.txid == *txid {
+                                       channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new()))
+                               } else { Ok((None, Vec::new())) }
+                       } else { Ok((None, Vec::new())) }
+               });
+       }
+}
+
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
+where
+       M::Target: chain::Watch<Signer>,
+       T::Target: BroadcasterInterface,
+       K::Target: KeysInterface<Signer = Signer>,
+       F::Target: FeeEstimator,
+       L::Target: Logger,
+{
+       /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
+       /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
+       /// the function.
+       fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>>
+                       (&self, height_opt: Option<u32>, f: FN) {
+               // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+               // during initialization prior to the chain_monitor being fully configured in some cases.
+               // See the docs for `ChannelManagerReadArgs` for more.
+
                let mut failed_channels = Vec::new();
                let mut timed_out_htlcs = Vec::new();
                {
@@ -3159,7 +3495,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                        let short_to_id = &mut channel_state.short_to_id;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
-                               let res = channel.block_connected(header, txdata, height);
+                               let res = f(channel);
                                if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
@@ -3185,127 +3521,69 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
                                                short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
                                        }
                                } else if let Err(e) = res {
+                                       if let Some(short_id) = channel.get_short_channel_id() {
+                                               short_to_id.remove(&short_id);
+                                       }
+                                       // It looks like our counterparty went on-chain or funding transaction was
+                                       // reorged out of the main chain. Close the channel.
+                                       failed_channels.push(channel.force_shutdown(true));
+                                       if let Ok(update) = self.get_channel_update(&channel) {
+                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+                                                       msg: update
+                                               });
+                                       }
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage { msg: e },
                                        });
                                        return false;
                                }
-                               if let Some(funding_txo) = channel.get_funding_txo() {
-                                       for &(_, tx) in txdata.iter() {
-                                               for inp in tx.input.iter() {
-                                                       if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
-                                                               log_trace!(self.logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
-                                                               if let Some(short_id) = channel.get_short_channel_id() {
-                                                                       short_to_id.remove(&short_id);
-                                                               }
-                                                               // It looks like our counterparty went on-chain. We go ahead and
-                                                               // broadcast our latest local state as well here, just in case its
-                                                               // some kind of SPV attack, though we expect these to be dropped.
-                                                               failed_channels.push(channel.force_shutdown(true));
-                                                               if let Ok(update) = self.get_channel_update(&channel) {
-                                                                       pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                                               msg: update
-                                                                       });
-                                                               }
-                                                               return false;
-                                                       }
-                                               }
-                                       }
-                               }
                                true
                        });
 
-                       channel_state.claimable_htlcs.retain(|&(ref payment_hash, _), htlcs| {
-                               htlcs.retain(|htlc| {
-                                       // If height is approaching the number of blocks we think it takes us to get
-                                       // our commitment transaction confirmed before the HTLC expires, plus the
-                                       // number of blocks we generally consider it to take to do a commitment update,
-                                       // just give up on it and fail the HTLC.
-                                       if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
-                                               let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
-                                               htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
-                                               timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
-                                                       failure_code: 0x4000 | 15,
-                                                       data: htlc_msat_height_data
-                                               }));
-                                               false
-                                       } else { true }
+                       if let Some(height) = height_opt {
+                               channel_state.claimable_htlcs.retain(|&(ref payment_hash, _), htlcs| {
+                                       htlcs.retain(|htlc| {
+                                               // If height is approaching the number of blocks we think it takes us to get
+                                               // our commitment transaction confirmed before the HTLC expires, plus the
+                                               // number of blocks we generally consider it to take to do a commitment update,
+                                               // just give up on it and fail the HTLC.
+                                               if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+                                                       let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+                                                       htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+                                                       timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+                                                               failure_code: 0x4000 | 15,
+                                                               data: htlc_msat_height_data
+                                                       }));
+                                                       false
+                                               } else { true }
+                                       });
+                                       !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
                                });
-                               !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
-                       });
-               }
-               for failure in failed_channels.drain(..) {
-                       self.finish_force_close_channel(failure);
+                       }
                }
 
+               self.handle_init_event_channel_failures(failed_channels);
+
                for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
                }
-               self.latest_block_height.store(height as usize, Ordering::Release);
-               *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
-               loop {
-                       // Update last_node_announcement_serial to be the max of its current value and the
-                       // block timestamp. This should keep us close to the current time without relying on
-                       // having an explicit local time source.
-                       // Just in case we end up in a race, we loop until we either successfully update
-                       // last_node_announcement_serial or decide we don't need to.
-                       let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire);
-                       if old_serial >= header.time as usize { break; }
-                       if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
-                               break;
-                       }
-               }
-       }
-
-       /// Updates channel state based on a disconnected block.
-       ///
-       /// If necessary, the channel may be force-closed without letting the counterparty participate
-       /// in the shutdown.
-       pub fn block_disconnected(&self, header: &BlockHeader) {
-               let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-               let mut failed_channels = Vec::new();
-               {
-                       let mut channel_lock = self.channel_state.lock().unwrap();
-                       let channel_state = &mut *channel_lock;
-                       let short_to_id = &mut channel_state.short_to_id;
-                       let pending_msg_events = &mut channel_state.pending_msg_events;
-                       channel_state.by_id.retain(|_,  v| {
-                               if v.block_disconnected(header) {
-                                       if let Some(short_id) = v.get_short_channel_id() {
-                                               short_to_id.remove(&short_id);
-                                       }
-                                       failed_channels.push(v.force_shutdown(true));
-                                       if let Ok(update) = self.get_channel_update(&v) {
-                                               pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
-                                                       msg: update
-                                               });
-                                       }
-                                       false
-                               } else {
-                                       true
-                               }
-                       });
-               }
-               for failure in failed_channels.drain(..) {
-                       self.finish_force_close_channel(failure);
-               }
-               self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
-               *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.block_hash();
        }
 
        /// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
-       /// indicating whether persistence is necessary. Only one listener on `wait_timeout` is
-       /// guaranteed to be woken up.
+       /// indicating whether persistence is necessary. Only one listener on
+       /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
+       /// up.
        /// Note that the feature `allow_wallclock_use` must be enabled to use this function.
        #[cfg(any(test, feature = "allow_wallclock_use"))]
-       pub fn wait_timeout(&self, max_wait: Duration) -> bool {
+       pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
                self.persistence_notifier.wait_timeout(max_wait)
        }
 
-       /// Blocks until ChannelManager needs to be persisted. Only one listener on `wait` is
-       /// guaranteed to be woken up.
-       pub fn wait(&self) {
+       /// Blocks until ChannelManager needs to be persisted. Only one listener on
+       /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
+       /// up.
+       pub fn await_persistable_update(&self) {
                self.persistence_notifier.wait()
        }
 
@@ -3351,9 +3629,9 @@ impl<Signer: Sign, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sy
                let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
        }
 
-       fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
+       fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-               let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id);
+               let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, their_features, msg), *counterparty_node_id);
        }
 
        fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
@@ -3401,6 +3679,11 @@ impl<Signer: Sign, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sy
                let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
        }
 
+       fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
+               let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+               let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
+       }
+
        fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
                let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
@@ -3480,6 +3763,7 @@ impl<Signer: Sign, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sy
                                        &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
                                        &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
                                        &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
+                                       &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
                                }
                        });
                }
@@ -3557,7 +3841,7 @@ impl<Signer: Sign, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sy
 }
 
 /// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to
-/// disk/backups, through `wait_timeout` and `wait`.
+/// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`.
 struct PersistenceNotifier {
        /// Users won't access the persistence_lock directly, but rather wait on its bool using
        /// `wait_timeout` and `wait`.
@@ -3846,8 +4130,11 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
 
                self.genesis_hash.write(writer)?;
-               (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
-               self.last_block_hash.lock().unwrap().write(writer)?;
+               {
+                       let best_block = self.best_block.read().unwrap();
+                       best_block.height().write(writer)?;
+                       best_block.block_hash().write(writer)?;
+               }
 
                let channel_state = self.channel_state.lock().unwrap();
                let mut unfunded_channels = 0;
@@ -3895,6 +4182,18 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
                        event.write(writer)?;
                }
 
+               let background_events = self.pending_background_events.lock().unwrap();
+               (background_events.len() as u64).write(writer)?;
+               for event in background_events.iter() {
+                       match event {
+                               BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
+                                       0u8.write(writer)?;
+                                       funding_txo.write(writer)?;
+                                       monitor_update.write(writer)?;
+                               },
+                       }
+               }
+
                (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
 
                Ok(())
@@ -3906,15 +4205,26 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
 /// is:
 /// 1) Deserialize all stored ChannelMonitors.
-/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash,
-///    ChannelManager)>::read(reader, args).
+/// 2) Deserialize the ChannelManager by filling in this struct and calling:
+///    <(BlockHash, ChannelManager)>::read(reader, args)
 ///    This may result in closing some Channels if the ChannelMonitor is newer than the stored
 ///    ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
-/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
-///    ChannelMonitor::get_outputs_to_watch() and ChannelMonitor::get_funding_txo().
+/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same
+///    way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and
+///    ChannelMonitor::get_funding_txo().
 /// 4) Reconnect blocks on your ChannelMonitors.
-/// 5) Move the ChannelMonitors into your local chain::Watch.
-/// 6) Disconnect/connect blocks on the ChannelManager.
+/// 5) Disconnect/connect blocks on the ChannelManager.
+/// 6) Move the ChannelMonitors into your local chain::Watch.
+///
+/// Note that the ordering of #4-6 is not of importance, however all three must occur before you
+/// call any other methods on the newly-deserialized ChannelManager.
+///
+/// Note that because some channels may be closed during deserialization, it is critical that you
+/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
+/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
+/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
+/// not force-close the same channels but consider them live), you may end up revoking a state for
+/// which you've already broadcasted the transaction.
 pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
        where M::Target: chain::Watch<Signer>,
         T::Target: BroadcasterInterface,
@@ -4016,8 +4326,8 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                }
 
                let genesis_hash: BlockHash = Readable::read(reader)?;
-               let latest_block_height: u32 = Readable::read(reader)?;
-               let last_block_hash: BlockHash = Readable::read(reader)?;
+               let best_block_height: u32 = Readable::read(reader)?;
+               let best_block_hash: BlockHash = Readable::read(reader)?;
 
                let mut failed_htlcs = Vec::new();
 
@@ -4027,10 +4337,6 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
                for _ in 0..channel_count {
                        let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
-                       if channel.last_block_connected != Default::default() && channel.last_block_connected != last_block_hash {
-                               return Err(DecodeError::InvalidValue);
-                       }
-
                        let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
                        funding_txo_set.insert(funding_txo.clone());
                        if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
@@ -4045,7 +4351,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                                channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
                                                channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
                                        // But if the channel is behind of the monitor, close the channel:
-                                       let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true);
+                                       let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
                                        failed_htlcs.append(&mut new_failed_htlcs);
                                        monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
                                } else {
@@ -4109,17 +4415,27 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                        }
                }
 
+               let background_event_count: u64 = Readable::read(reader)?;
+               let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
+               for _ in 0..background_event_count {
+                       match <u8 as Readable>::read(reader)? {
+                               0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
+                               _ => return Err(DecodeError::InvalidValue),
+                       }
+               }
+
                let last_node_announcement_serial: u32 = Readable::read(reader)?;
 
+               let mut secp_ctx = Secp256k1::new();
+               secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
+
                let channel_manager = ChannelManager {
                        genesis_hash,
                        fee_estimator: args.fee_estimator,
                        chain_monitor: args.chain_monitor,
                        tx_broadcaster: args.tx_broadcaster,
 
-                       latest_block_height: AtomicUsize::new(latest_block_height as usize),
-                       last_block_hash: Mutex::new(last_block_hash),
-                       secp_ctx: Secp256k1::new(),
+                       best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
 
                        channel_state: Mutex::new(ChannelHolder {
                                by_id,
@@ -4129,12 +4445,15 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                                pending_msg_events: Vec::new(),
                        }),
                        our_network_key: args.keys_manager.get_node_secret(),
+                       our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
+                       secp_ctx,
 
                        last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
 
                        per_peer_state: RwLock::new(per_peer_state),
 
                        pending_events: Mutex::new(pending_events_read),
+                       pending_background_events: Mutex::new(pending_background_events_read),
                        total_consistency_lock: RwLock::new(()),
                        persistence_notifier: PersistenceNotifier::new(),
 
@@ -4150,7 +4469,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
                //TODO: Broadcast channel update for closed channels, but only after we've made a
                //connection or two.
 
-               Ok((last_block_hash.clone(), channel_manager))
+               Ok((best_block_hash.clone(), channel_manager))
        }
 }
 
@@ -4204,3 +4523,150 @@ mod tests {
                }
        }
 }
+
+#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
+pub mod bench {
+       use chain::Listen;
+       use chain::chainmonitor::ChainMonitor;
+       use chain::channelmonitor::Persist;
+       use chain::keysinterface::{KeysManager, InMemorySigner};
+       use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
+       use ln::features::InitFeatures;
+       use ln::functional_test_utils::*;
+       use ln::msgs::ChannelMessageHandler;
+       use routing::network_graph::NetworkGraph;
+       use routing::router::get_route;
+       use util::test_utils;
+       use util::config::UserConfig;
+       use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+
+       use bitcoin::hashes::Hash;
+       use bitcoin::hashes::sha256::Hash as Sha256;
+       use bitcoin::{Block, BlockHeader, Transaction, TxOut};
+
+       use std::sync::Mutex;
+
+       use test::Bencher;
+
+       struct NodeHolder<'a, P: Persist<InMemorySigner>> {
+               node: &'a ChannelManager<InMemorySigner,
+                       &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
+                               &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
+                               &'a test_utils::TestLogger, &'a P>,
+                       &'a test_utils::TestBroadcaster, &'a KeysManager,
+                       &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>
+       }
+
+       #[cfg(test)]
+       #[bench]
+       fn bench_sends(bench: &mut Bencher) {
+               bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+       }
+
+       pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+               // Do a simple benchmark of sending a payment back and forth between two nodes.
+               // Note that this is unrealistic as each payment send will require at least two fsync
+               // calls per node.
+               let network = bitcoin::Network::Testnet;
+               let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
+
+               let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
+               let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+
+               let mut config: UserConfig = Default::default();
+               config.own_channel_config.minimum_depth = 1;
+
+               let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
+               let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
+               let seed_a = [1u8; 32];
+               let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
+               let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &logger_a, &keys_manager_a, config.clone(), ChainParameters {
+                       network,
+                       best_block: BestBlock::from_genesis(network),
+               });
+               let node_a_holder = NodeHolder { node: &node_a };
+
+               let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
+               let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
+               let seed_b = [2u8; 32];
+               let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
+               let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &logger_b, &keys_manager_b, config.clone(), ChainParameters {
+                       network,
+                       best_block: BestBlock::from_genesis(network),
+               });
+               let node_b_holder = NodeHolder { node: &node_b };
+
+               node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
+               node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
+               node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
+
+               let tx;
+               if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
+                       tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+                               value: 8_000_000, script_pubkey: output_script,
+                       }]};
+                       node_a.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+               } else { panic!(); }
+
+               node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
+               node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
+
+               assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
+
+               let block = Block {
+                       header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+                       txdata: vec![tx],
+               };
+               Listen::block_connected(&node_a, &block, 1);
+               Listen::block_connected(&node_b, &block, 1);
+
+               node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
+               node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+
+               let dummy_graph = NetworkGraph::new(genesis_hash);
+
+               macro_rules! send_payment {
+                       ($node_a: expr, $node_b: expr) => {
+                               let usable_channels = $node_a.list_usable_channels();
+                               let route = get_route(&$node_a.get_our_node_id(), &dummy_graph, &$node_b.get_our_node_id(), None, Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a).unwrap();
+
+                               let payment_preimage = PaymentPreimage([0; 32]);
+                               let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+
+                               $node_a.send_payment(&route, payment_hash, &None).unwrap();
+                               let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
+                               $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
+                               $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
+                               let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
+                               $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
+                               $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
+                               $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
+
+                               expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
+                               expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
+                               assert!($node_b.claim_funds(payment_preimage, &None, 10_000));
+
+                               match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
+                                       MessageSendEvent::UpdateHTLCs { node_id, updates } => {
+                                               assert_eq!(node_id, $node_a.get_our_node_id());
+                                               $node_a.handle_update_fulfill_htlc(&$node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+                                               $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &updates.commitment_signed);
+                                       },
+                                       _ => panic!("Failed to generate claim event"),
+                               }
+
+                               let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
+                               $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
+                               $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
+                               $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
+
+                               expect_payment_sent!(NodeHolder { node: &$node_a }, payment_preimage);
+                       }
+               }
+
+               bench.iter(|| {
+                       send_payment!(node_a, node_b);
+                       send_payment!(node_b, node_a);
+               });
+       }
+}