]> git.bitcoin.ninja Git - rust-lightning/commitdiff
Merge pull request #856 from TheBlueMatt/2021-03-check-tx
authorMatt Corallo <649246+TheBlueMatt@users.noreply.github.com>
Sat, 10 Apr 2021 20:27:24 +0000 (20:27 +0000)
committerGitHub <noreply@github.com>
Sat, 10 Apr 2021 20:27:24 +0000 (20:27 +0000)
Take the full funding transaction from the user on generation

1  2 
background-processor/src/lib.rs
fuzz/src/chanmon_consistency.rs
fuzz/src/full_stack.rs
lightning/src/ln/chanmon_update_fail_tests.rs
lightning/src/ln/channel.rs
lightning/src/ln/channelmanager.rs
lightning/src/ln/functional_test_utils.rs
lightning/src/ln/functional_tests.rs

index 6d9db076fa44a6cd0fd2c749f76aa6c4448e18b7,49be55cf9b60c59e3f646501c4a0741ccb1009df..248870073658ecb2b9119bcda45e2872a427fe4a
@@@ -12,8 -12,6 +12,8 @@@ use lightning::chain
  use lightning::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
  use lightning::chain::keysinterface::{Sign, KeysInterface};
  use lightning::ln::channelmanager::ChannelManager;
 +use lightning::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
 +use lightning::ln::peer_handler::{PeerManager, SocketDescriptor};
  use lightning::util::logger::Logger;
  use std::sync::Arc;
  use std::sync::atomic::{AtomicBool, Ordering};
@@@ -65,50 -63,40 +65,50 @@@ impl BackgroundProcessor 
        /// [`ChannelManager`]: lightning::ln::channelmanager::ChannelManager
        /// [`ChannelManager::write`]: lightning::ln::channelmanager::ChannelManager#impl-Writeable
        /// [`FilesystemPersister::persist_manager`]: lightning_persister::FilesystemPersister::persist_manager
 -      pub fn start<PM, Signer, M, T, K, F, L>(persist_manager: PM, manager: Arc<ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>>, logger: Arc<L>) -> Self
 -      where Signer: 'static + Sign,
 -            M: 'static + chain::Watch<Signer>,
 -            T: 'static + BroadcasterInterface,
 -            K: 'static + KeysInterface<Signer=Signer>,
 -            F: 'static + FeeEstimator,
 -            L: 'static + Logger,
 -            PM: 'static + Send + Fn(&ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>) -> Result<(), std::io::Error>,
 +      pub fn start<PM, Signer, M, T, K, F, L, Descriptor: 'static + SocketDescriptor + Send, CM, RM>(
 +              persist_channel_manager: PM,
 +              channel_manager: Arc<ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>>,
 +              peer_manager: Arc<PeerManager<Descriptor, Arc<CM>, Arc<RM>, Arc<L>>>, logger: Arc<L>,
 +      ) -> Self
 +      where
 +              Signer: 'static + Sign,
 +              M: 'static + chain::Watch<Signer>,
 +              T: 'static + BroadcasterInterface,
 +              K: 'static + KeysInterface<Signer = Signer>,
 +              F: 'static + FeeEstimator,
 +              L: 'static + Logger,
 +              CM: 'static + ChannelMessageHandler,
 +              RM: 'static + RoutingMessageHandler,
 +              PM: 'static
 +                      + Send
 +                      + Fn(
 +                              &ChannelManager<Signer, Arc<M>, Arc<T>, Arc<K>, Arc<F>, Arc<L>>,
 +                      ) -> Result<(), std::io::Error>,
        {
                let stop_thread = Arc::new(AtomicBool::new(false));
                let stop_thread_clone = stop_thread.clone();
                let handle = thread::spawn(move || -> Result<(), std::io::Error> {
                        let mut current_time = Instant::now();
                        loop {
 -                              let updates_available = manager.await_persistable_update_timeout(Duration::from_millis(100));
 +                              peer_manager.process_events();
 +                              let updates_available =
 +                                      channel_manager.await_persistable_update_timeout(Duration::from_millis(100));
                                if updates_available {
 -                                      persist_manager(&*manager)?;
 +                                      persist_channel_manager(&*channel_manager)?;
                                }
                                // Exit the loop if the background processor was requested to stop.
                                if stop_thread.load(Ordering::Acquire) == true {
                                        log_trace!(logger, "Terminating background processor.");
 -                                      return Ok(())
 +                                      return Ok(());
                                }
                                if current_time.elapsed().as_secs() > CHAN_FRESHNESS_TIMER {
                                        log_trace!(logger, "Calling manager's timer_chan_freshness_every_min");
 -                                      manager.timer_chan_freshness_every_min();
 +                                      channel_manager.timer_chan_freshness_every_min();
                                        current_time = Instant::now();
                                }
                        }
                });
 -              Self {
 -                      stop_thread: stop_thread_clone,
 -                      thread_handle: handle,
 -              }
 +              Self { stop_thread: stop_thread_clone, thread_handle: handle }
        }
  
        /// Stop `BackgroundProcessor`'s thread.
@@@ -132,7 -120,6 +132,7 @@@ mod tests 
        use lightning::ln::channelmanager::{ChainParameters, ChannelManager, SimpleArcChannelManager};
        use lightning::ln::features::InitFeatures;
        use lightning::ln::msgs::ChannelMessageHandler;
 +      use lightning::ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor};
        use lightning::util::config::UserConfig;
        use lightning::util::events::{Event, EventsProvider, MessageSendEventsProvider, MessageSendEvent};
        use lightning::util::logger::Logger;
        use std::time::Duration;
        use super::BackgroundProcessor;
  
 +      #[derive(Clone, Eq, Hash, PartialEq)]
 +      struct TestDescriptor{}
 +      impl SocketDescriptor for TestDescriptor {
 +              fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize {
 +                      0
 +              }
 +
 +              fn disconnect_socket(&mut self) {}
 +      }
 +
        type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
  
        struct Node {
                node: Arc<SimpleArcChannelManager<ChainMonitor, test_utils::TestBroadcaster, test_utils::TestFeeEstimator, test_utils::TestLogger>>,
 +              peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, Arc<test_utils::TestLogger>>>,
                persister: Arc<FilesystemPersister>,
                logger: Arc<test_utils::TestLogger>,
        }
                                latest_height: 0,
                        };
                        let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster, logger.clone(), keys_manager.clone(), UserConfig::default(), params));
 -                      let node = Node { node: manager, persister, logger };
 +                      let msg_handler = MessageHandler { chan_handler: Arc::new(test_utils::TestChannelMessageHandler::new()), route_handler: Arc::new(test_utils::TestRoutingMessageHandler::new() )};
 +                      let peer_manager = Arc::new(PeerManager::new(msg_handler, keys_manager.get_node_secret(), &seed, logger.clone()));
 +                      let node = Node { node: manager, peer_manager, persister, logger };
                        nodes.push(node);
                }
                nodes
                        $node_a.node.handle_accept_channel(&$node_b.node.get_our_node_id(), InitFeatures::known(), &get_event_msg!($node_b, MessageSendEvent::SendAcceptChannel, $node_a.node.get_our_node_id()));
                        let events = $node_a.node.get_and_clear_pending_events();
                        assert_eq!(events.len(), 1);
-                       let (temporary_channel_id, tx, funding_output) = match events[0] {
+                       let (temporary_channel_id, tx) = match events[0] {
                                Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
                                        assert_eq!(*channel_value_satoshis, $channel_value);
                                        assert_eq!(user_channel_id, 42);
                                        let tx = Transaction { version: 1 as i32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
                                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                                        }]};
-                                       let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 };
-                                       (*temporary_channel_id, tx, funding_outpoint)
+                                       (*temporary_channel_id, tx)
                                },
                                _ => panic!("Unexpected event"),
                        };
  
-                       $node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
+                       $node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
                        $node_b.node.handle_funding_created(&$node_a.node.get_our_node_id(), &get_event_msg!($node_a, MessageSendEvent::SendFundingCreated, $node_b.node.get_our_node_id()));
                        $node_a.node.handle_funding_signed(&$node_b.node.get_our_node_id(), &get_event_msg!($node_b, MessageSendEvent::SendFundingSigned, $node_a.node.get_our_node_id()));
                        tx
                // Initiate the background processors to watch each node.
                let data_dir = nodes[0].persister.get_data_dir();
                let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
 -              let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].logger.clone());
 +              let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
  
                // Go through the channel creation process until each node should have something persisted.
                let tx = open_channel!(nodes[0], nodes[1], 100000);
                let nodes = create_nodes(1, "test_chan_freshness_called".to_string());
                let data_dir = nodes[0].persister.get_data_dir();
                let callback = move |node: &ChannelManager<InMemorySigner, Arc<ChainMonitor>, Arc<test_utils::TestBroadcaster>, Arc<KeysManager>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>>| FilesystemPersister::persist_manager(data_dir.clone(), node);
 -              let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].logger.clone());
 +              let bg_processor = BackgroundProcessor::start(callback, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
                loop {
                        let log_entries = nodes[0].logger.lines.lock().unwrap();
                        let desired_log = "Calling manager's timer_chan_freshness_every_min".to_string();
                }
  
                let nodes = create_nodes(2, "test_persist_error".to_string());
 -              let bg_processor = BackgroundProcessor::start(persist_manager, nodes[0].node.clone(), nodes[0].logger.clone());
 +              let bg_processor = BackgroundProcessor::start(persist_manager, nodes[0].node.clone(), nodes[0].peer_manager.clone(), nodes[0].logger.clone());
                open_channel!(nodes[0], nodes[1], 100000);
  
                let _ = bg_processor.thread_handle.join().unwrap().expect_err("Errored persisting manager: test");
index 87b95cf2a538a972e882a2d2ad0ca6fb2bd99c47,3feeaf46d5e1636d01736a904c9a52aeb8f2b5fa..a6a6a853ed17d877cf51084c035a5b6aaaa55d4a
@@@ -234,7 -234,7 +234,7 @@@ fn check_api_err(api_err: APIError) 
                                _ if err.starts_with("Cannot send value that would put our balance under counterparty-announced channel reserve value") => {},
                                _ if err.starts_with("Cannot send value that would overdraw remaining funds.") => {},
                                _ if err.starts_with("Cannot send value that would not leave enough to pay for fees.") => {},
 -                              _ => panic!(err),
 +                              _ => panic!("{}", err),
                        }
                },
                APIError::MonitorUpdateFailed => {
@@@ -397,7 -397,7 +397,7 @@@ pub fn do_test<Out: test_logger::Output
                                                value: *channel_value_satoshis, script_pubkey: output_script.clone(),
                                        }]};
                                        funding_output = OutPoint { txid: tx.txid(), index: 0 };
-                                       $source.funding_transaction_generated(&temporary_channel_id, funding_output);
+                                       $source.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
                                        channel_txn.push(tx);
                                } else { panic!("Wrong event type"); }
                        }
                        };
                        $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed);
  
-                       {
-                               let events = $source.get_and_clear_pending_events();
-                               assert_eq!(events.len(), 1);
-                               if let events::Event::FundingBroadcastSafe { .. } = events[0] {
-                               } else { panic!("Wrong event type"); }
-                       }
                        funding_output
                } }
        }
                        let chain_hash = genesis_block(Network::Bitcoin).block_hash();
                        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: chain_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
                        let txdata: Vec<_> = channel_txn.iter().enumerate().map(|(i, tx)| (i + 1, tx)).collect();
 -                      $node.block_connected(&header, &txdata, 1);
 -                      for i in 2..100 {
 +                      $node.transactions_confirmed(&header, 1, &txdata);
 +                      for _ in 2..100 {
                                header = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
 -                              $node.block_connected(&header, &[], i);
                        }
 +                      $node.update_best_block(&header, 99);
                } }
        }
  
diff --combined fuzz/src/full_stack.rs
index 132d4f8ebd20123ccde8c120a63ab0e1b1a264cc,3acf7ba53ca7c642c6d3bcf4685480eab1131741..9e4f18b5df8611d442c6ab13c2afc0341aed8e31
@@@ -27,7 -27,6 +27,7 @@@ use bitcoin::hashes::sha256::Hash as Sh
  use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
  
  use lightning::chain;
 +use lightning::chain::Listen;
  use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
  use lightning::chain::chainmonitor;
  use lightning::chain::transaction::OutPoint;
@@@ -52,7 -51,7 +52,7 @@@ use bitcoin::secp256k1::Secp256k1
  use std::cell::RefCell;
  use std::collections::{HashMap, hash_map};
  use std::cmp;
- use std::sync::Arc;
+ use std::sync::{Arc, Mutex};
  use std::sync::atomic::{AtomicU64,AtomicUsize,Ordering};
  
  #[inline]
@@@ -117,9 -116,13 +117,13 @@@ impl FeeEstimator for FuzzEstimator 
        }
  }
  
- struct TestBroadcaster {}
+ struct TestBroadcaster {
+       txn_broadcasted: Mutex<Vec<Transaction>>,
+ }
  impl BroadcasterInterface for TestBroadcaster {
-       fn broadcast_transaction(&self, _tx: &Transaction) {}
+       fn broadcast_transaction(&self, tx: &Transaction) {
+               self.txn_broadcasted.lock().unwrap().push(tx.clone());
+       }
  }
  
  #[derive(Clone)]
@@@ -203,8 -206,7 +207,8 @@@ impl<'a> MoneyLossDetector<'a> 
                self.blocks_connected += 1;
                let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height].0, merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 };
                self.height += 1;
 -              self.manager.block_connected(&header, &txdata, self.height as u32);
 +              self.manager.transactions_confirmed(&header, self.height as u32, &txdata);
 +              self.manager.update_best_block(&header, self.height as u32);
                (*self.monitor).block_connected(&header, &txdata, self.height as u32);
                if self.header_hashes.len() > self.height {
                        self.header_hashes[self.height] = (header.block_hash(), self.blocks_connected);
        fn disconnect_block(&mut self) {
                if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
                        let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height - 1].0, merkle_root: Default::default(), time: self.header_hashes[self.height].1, bits: 42, nonce: 42 };
 -                      self.manager.block_disconnected(&header);
 +                      self.manager.block_disconnected(&header, self.height as u32);
                        self.monitor.block_disconnected(&header, self.height as u32);
                        self.height -= 1;
                        let removal_height = self.height;
@@@ -342,7 -344,7 +346,7 @@@ pub fn do_test(data: &[u8], logger: &Ar
                Err(_) => return,
        };
  
-       let broadcast = Arc::new(TestBroadcaster{});
+       let broadcast = Arc::new(TestBroadcaster{ txn_broadcasted: Mutex::new(Vec::new()) });
        let monitor = Arc::new(chainmonitor::ChainMonitor::new(None, broadcast.clone(), Arc::clone(&logger), fee_est.clone(), Arc::new(TestPersister{})));
  
        let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) });
        let mut payments_sent = 0;
        let mut pending_funding_generation: Vec<([u8; 32], u64, Script)> = Vec::new();
        let mut pending_funding_signatures = HashMap::new();
-       let mut pending_funding_relay = Vec::new();
  
        loop {
                match get_slice!(1)[0] {
                                                        continue 'outer_loop;
                                                }
                                        };
-                                       channelmanager.funding_transaction_generated(&funding_generation.0, funding_output.clone());
+                                       channelmanager.funding_transaction_generated(&funding_generation.0, tx.clone()).unwrap();
                                        pending_funding_signatures.insert(funding_output, tx);
                                }
                        },
                        11 => {
-                               if !pending_funding_relay.is_empty() {
-                                       loss_detector.connect_block(&pending_funding_relay[..]);
+                               let mut txn = broadcast.txn_broadcasted.lock().unwrap();
+                               if !txn.is_empty() {
+                                       loss_detector.connect_block(&txn[..]);
                                        for _ in 2..100 {
                                                loss_detector.connect_block(&[]);
                                        }
                                }
-                               for tx in pending_funding_relay.drain(..) {
+                               for tx in txn.drain(..) {
                                        loss_detector.funding_txn.push(tx);
                                }
                        },
                                Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, output_script, .. } => {
                                        pending_funding_generation.push((temporary_channel_id, channel_value_satoshis, output_script));
                                },
-                               Event::FundingBroadcastSafe { funding_txo, .. } => {
-                                       pending_funding_relay.push(pending_funding_signatures.remove(&funding_txo).unwrap());
-                               },
                                Event::PaymentReceived { payment_hash, payment_secret, amt } => {
                                        //TODO: enhance by fetching random amounts from fuzz input?
                                        payments_received.push((payment_hash, payment_secret, amt));
index 13eb4ed8b465ae0de4473d82877ed8f13a0b9a63,dc37e3f6c7421295691f34f1b821d1fe9cb2f1ea..a7cc5377a2532a14644619e623a64ed6a0278ad1
@@@ -241,7 -241,7 +241,7 @@@ fn do_test_simple_monitor_temporary_upd
        // ...and make sure we can force-close a frozen channel
        nodes[0].node.force_close_channel(&channel_id).unwrap();
        check_added_monitors!(nodes[0], 1);
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
  
        // TODO: Once we hit the chain with the failure transaction we should check that we get a
        // PaymentFailed event
@@@ -1825,7 -1825,7 +1825,7 @@@ fn do_during_funding_monitor_fail(confi
  
        let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
  
-       nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_tx.clone()).unwrap();
        check_added_monitors!(nodes[0], 0);
  
        *nodes[1].chain_monitor.update_ret.lock().unwrap() = Some(Err(ChannelMonitorUpdateErr::TemporaryFailure));
        check_added_monitors!(nodes[0], 0);
  
        let events = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events.len(), 1);
-       match events[0] {
-               Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
-                       assert_eq!(user_channel_id, 43);
-                       assert_eq!(*funding_txo, funding_output);
-               },
-               _ => panic!("Unexpected event"),
-       };
+       assert_eq!(events.len(), 0);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].txid(), funding_output.txid);
  
        if confirm_a_first {
                confirm_transaction(&nodes[0], &funding_tx);
index a54b9cb19386231a36bd460f66092dea51d74276,174aecd3ba036f46dc9acdc95d2b287ef536d812..c13ac9b61ffc94b000f4204ea23baf8a14a0e6aa
@@@ -7,6 -7,7 +7,6 @@@
  // You may not use this file except in accordance with one or both of these
  // licenses.
  
 -use bitcoin::blockdata::block::BlockHeader;
  use bitcoin::blockdata::script::{Script,Builder};
  use bitcoin::blockdata::transaction::{TxIn, TxOut, Transaction, SigHashType};
  use bitcoin::blockdata::opcodes;
@@@ -375,10 -376,13 +375,10 @@@ pub(super) struct Channel<Signer: Sign
  
        last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig)
  
 -      /// The hash of the block in which the funding transaction reached our CONF_TARGET. We use this
 -      /// to detect unconfirmation after a serialize-unserialize roundtrip where we may not see a full
 -      /// series of block_connected/block_disconnected calls. Obviously this is not a guarantee as we
 -      /// could miss the funding_tx_confirmed_in block as well, but it serves as a useful fallback.
 +      /// The hash of the block in which the funding transaction was included.
        funding_tx_confirmed_in: Option<BlockHash>,
 +      funding_tx_confirmation_height: u64,
        short_channel_id: Option<u64>,
 -      funding_tx_confirmations: u64,
  
        counterparty_dust_limit_satoshis: u64,
        #[cfg(test)]
        counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
  
        pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
+       funding_transaction: Option<Transaction>,
  
        counterparty_cur_commitment_point: Option<PublicKey>,
        counterparty_prev_commitment_point: Option<PublicKey>,
        counterparty_node_id: PublicKey,
  
@@@ -437,6 -441,10 +437,6 @@@ struct CommitmentTxInfoCached 
  }
  
  pub const OUR_MAX_HTLCS: u16 = 50; //TODO
 -/// Confirmation count threshold at which we close a channel. Ideally we'd keep the channel around
 -/// on ice until the funding transaction gets more confirmations, but the LN protocol doesn't
 -/// really allow for this, so instead we're stuck closing it out at that point.
 -const UNCONF_THRESHOLD: u32 = 6;
  const SPENDING_INPUT_FOR_A_OUTPUT_WEIGHT: u64 = 79; // prevout: 36, nSequence: 4, script len: 1, witness lengths: (3+1)/4, sig: 73/4, if-selector: 1, redeemScript: (6 ops + 2*33 pubkeys + 1*2 delay)/4
  const B_OUTPUT_PLUS_SPENDING_INPUT_WEIGHT: u64 = 104; // prevout: 40, nSequence: 4, script len: 1, witness lengths: 3/4, sig: 73/4, pubkey: 33/4, output: 31 (TODO: Wrong? Useless?)
  
@@@ -573,8 -581,8 +573,8 @@@ impl<Signer: Sign> Channel<Signer> 
                        last_sent_closing_fee: None,
  
                        funding_tx_confirmed_in: None,
 +                      funding_tx_confirmation_height: 0,
                        short_channel_id: None,
 -                      funding_tx_confirmations: 0,
  
                        feerate_per_kw: feerate,
                        counterparty_dust_limit_satoshis: 0,
                                counterparty_parameters: None,
                                funding_outpoint: None
                        },
-                       counterparty_cur_commitment_point: None,
+                       funding_transaction: None,
  
+                       counterparty_cur_commitment_point: None,
                        counterparty_prev_commitment_point: None,
                        counterparty_node_id,
  
                        last_sent_closing_fee: None,
  
                        funding_tx_confirmed_in: None,
 +                      funding_tx_confirmation_height: 0,
                        short_channel_id: None,
 -                      funding_tx_confirmations: 0,
  
                        feerate_per_kw: msg.feerate_per_kw,
                        channel_value_satoshis: msg.funding_satoshis,
                                }),
                                funding_outpoint: None
                        },
-                       counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
+                       funding_transaction: None,
  
+                       counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
                        counterparty_prev_commitment_point: None,
                        counterparty_node_id,
  
  
        /// Handles a funding_signed message from the remote end.
        /// If this call is successful, broadcast the funding transaction (and not before!)
-       pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, last_block_hash: BlockHash, logger: &L) -> Result<ChannelMonitor<Signer>, ChannelError> where L::Target: Logger {
+       pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, last_block_hash: BlockHash, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction), ChannelError> where L::Target: Logger {
                if !self.is_outbound() {
                        return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
                }
                self.cur_holder_commitment_transaction_number -= 1;
                self.cur_counterparty_commitment_transaction_number -= 1;
  
-               Ok(channel_monitor)
+               Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap()))
        }
  
        pub fn funding_locked(&mut self, msg: &msgs::FundingLocked) -> Result<(), ChannelError> {
        /// Indicates that the latest ChannelMonitor update has been committed by the client
        /// successfully and we should restore normal operation. Returns messages which should be sent
        /// to the remote side.
-       pub fn monitor_updating_restored<L: Deref>(&mut self, logger: &L) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, bool, Option<msgs::FundingLocked>) where L::Target: Logger {
+       pub fn monitor_updating_restored<L: Deref>(&mut self, logger: &L) -> (Option<msgs::RevokeAndACK>, Option<msgs::CommitmentUpdate>, RAACommitmentOrder, Vec<(PendingHTLCInfo, u64)>, Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, Option<Transaction>, Option<msgs::FundingLocked>) where L::Target: Logger {
                assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
                self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
  
-               let needs_broadcast_safe = self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.is_outbound();
+               let funding_broadcastable = if self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.is_outbound() {
+                       self.funding_transaction.take()
+               } else { None };
  
-               // Because we will never generate a FundingBroadcastSafe event when we're in
-               // MonitorUpdateFailed, if we assume the user only broadcast the funding transaction when
-               // they received the FundingBroadcastSafe event, we can only ever hit
-               // monitor_pending_funding_locked when we're an inbound channel which failed to persist the
-               // monitor on funding_created, and we even got the funding transaction confirmed before the
-               // monitor was persisted.
+               // We will never broadcast the funding transaction when we're in MonitorUpdateFailed (and
+               // we assume the user never directly broadcasts the funding transaction and waits for us to
+               // do it). Thus, we can only ever hit monitor_pending_funding_locked when we're an inbound
+               // channel which failed to persist the monitor on funding_created, and we got the funding
+               // transaction confirmed before the monitor was persisted.
                let funding_locked = if self.monitor_pending_funding_locked {
-                       assert!(!self.is_outbound(), "Funding transaction broadcast without FundingBroadcastSafe!");
+                       assert!(!self.is_outbound(), "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
                        self.monitor_pending_funding_locked = false;
                        let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
                        Some(msgs::FundingLocked {
                if self.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
                        self.monitor_pending_revoke_and_ack = false;
                        self.monitor_pending_commitment_signed = false;
-                       return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, needs_broadcast_safe, funding_locked);
+                       return (None, None, RAACommitmentOrder::RevokeAndACKFirst, forwards, failures, funding_broadcastable, funding_locked);
                }
  
                let raa = if self.monitor_pending_revoke_and_ack {
                self.monitor_pending_commitment_signed = false;
                let order = self.resend_order.clone();
                log_trace!(logger, "Restored monitor updating resulting in {}{} commitment update and {} RAA, with {} first",
-                       if needs_broadcast_safe { "a funding broadcast safe, " } else { "" },
+                       if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
                        if commitment_update.is_some() { "a" } else { "no" },
                        if raa.is_some() { "an" } else { "no" },
                        match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
-               (raa, commitment_update, order, forwards, failures, needs_broadcast_safe, funding_locked)
+               (raa, commitment_update, order, forwards, failures, funding_broadcastable, funding_locked)
        }
  
        pub fn update_fee<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::UpdateFee) -> Result<(), ChannelError>
                self.network_sync == UpdateStatus::DisabledMarked
        }
  
 -      /// When we receive a new block, we (a) check whether the block contains the funding
 -      /// transaction (which would start us counting blocks until we send the funding_signed), and
 -      /// (b) check the height of the block against outbound holding cell HTLCs in case we need to
 -      /// give up on them prematurely and time them out. Everything else (e.g. commitment
 -      /// transaction broadcasts, channel closure detection, HTLC transaction broadcasting, etc) is
 +      fn check_get_funding_locked(&mut self, height: u32) -> Option<msgs::FundingLocked> {
 +              if self.funding_tx_confirmation_height == 0 {
 +                      return None;
 +              }
 +
 +              let funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
 +              if funding_tx_confirmations <= 0 {
 +                      self.funding_tx_confirmation_height = 0;
 +              }
 +
 +              if funding_tx_confirmations < self.minimum_depth as i64 {
 +                      return None;
 +              }
 +
 +              let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
 +              let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
 +                      self.channel_state |= ChannelState::OurFundingLocked as u32;
 +                      true
 +              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
 +                      self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
 +                      self.update_time_counter += 1;
 +                      true
 +              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
 +                      // We got a reorg but not enough to trigger a force close, just ignore.
 +                      false
 +              } else if self.channel_state < ChannelState::ChannelFunded as u32 {
 +                      panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
 +              } else {
 +                      // We got a reorg but not enough to trigger a force close, just ignore.
 +                      false
 +              };
 +
 +              if need_commitment_update {
 +                      if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
 +                              let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
 +                              return Some(msgs::FundingLocked {
 +                                      channel_id: self.channel_id,
 +                                      next_per_commitment_point,
 +                              });
 +                      } else {
 +                              self.monitor_pending_funding_locked = true;
 +                      }
 +              }
 +              None
 +      }
 +
 +      /// When a transaction is confirmed, we check whether it is or spends the funding transaction
 +      /// In the first case, we store the confirmation height and calculating the short channel id.
 +      /// In the second, we simply return an Err indicating we need to be force-closed now.
 +      pub fn transactions_confirmed<L: Deref>(&mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData, logger: &L)
 +                      -> Result<Option<msgs::FundingLocked>, msgs::ErrorMessage> where L::Target: Logger {
 +              let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
 +              for &(index_in_block, tx) in txdata.iter() {
 +                      if let Some(funding_txo) = self.get_funding_txo() {
 +                              // If we haven't yet sent a funding_locked, but are in FundingSent (ignoring
 +                              // whether they've sent a funding_locked or not), check if we should send one.
 +                              if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
 +                                      if tx.txid() == funding_txo.txid {
 +                                              let txo_idx = funding_txo.index as usize;
 +                                              if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
 +                                                              tx.output[txo_idx].value != self.channel_value_satoshis {
 +                                                      if self.is_outbound() {
 +                                                              // If we generated the funding transaction and it doesn't match what it
 +                                                              // should, the client is really broken and we should just panic and
 +                                                              // tell them off. That said, because hash collisions happen with high
 +                                                              // probability in fuzztarget mode, if we're fuzzing we just close the
 +                                                              // channel and move on.
 +                                                              #[cfg(not(feature = "fuzztarget"))]
 +                                                              panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
 +                                                      }
 +                                                      self.channel_state = ChannelState::ShutdownComplete as u32;
 +                                                      self.update_time_counter += 1;
 +                                                      return Err(msgs::ErrorMessage {
 +                                                              channel_id: self.channel_id(),
 +                                                              data: "funding tx had wrong script/value or output index".to_owned()
 +                                                      });
 +                                              } else {
 +                                                      if self.is_outbound() {
 +                                                              for input in tx.input.iter() {
 +                                                                      if input.witness.is_empty() {
 +                                                                              // We generated a malleable funding transaction, implying we've
 +                                                                              // just exposed ourselves to funds loss to our counterparty.
 +                                                                              #[cfg(not(feature = "fuzztarget"))]
 +                                                                              panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
 +                                                                      }
 +                                                              }
 +                                                      }
 +                                                      self.funding_tx_confirmation_height = height as u64;
 +                                                      self.funding_tx_confirmed_in = Some(*block_hash);
 +                                                      self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
 +                                                              Ok(scid) => Some(scid),
 +                                                              Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
 +                                                      }
 +                                              }
 +                                      }
 +                                      // If we allow 1-conf funding, we may need to check for funding_locked here and
 +                                      // send it immediately instead of waiting for an update_best_block call (which
 +                                      // may have already happened for this block).
 +                                      if let Some(funding_locked) = self.check_get_funding_locked(height) {
 +                                              return Ok(Some(funding_locked));
 +                                      }
 +                              }
 +                              for inp in tx.input.iter() {
 +                                      if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
 +                                              log_trace!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(self.channel_id()));
 +                                              return Err(msgs::ErrorMessage {
 +                                                      channel_id: self.channel_id(),
 +                                                      data: "Commitment or closing transaction was confirmed on chain.".to_owned()
 +                                              });
 +                                      }
 +                              }
 +                      }
 +              }
 +              Ok(None)
 +      }
 +
 +      /// When a new block is connected, we check the height of the block against outbound holding
 +      /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
 +      /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
        /// handled by the ChannelMonitor.
        ///
        /// If we return Err, the channel may have been closed, at which point the standard
        /// requirements apply - no calls may be made except those explicitly stated to be allowed
        /// post-shutdown.
 -      /// Only returns an ErrorAction of DisconnectPeer, if Err.
        ///
        /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
        /// back.
 -      pub fn block_connected(&mut self, header: &BlockHeader, txdata: &TransactionData, height: u32) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> {
 +      pub fn update_best_block(&mut self, height: u32, highest_header_time: u32) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage> {
                let mut timed_out_htlcs = Vec::new();
 +              let unforwarded_htlc_cltv_limit = height + HTLC_FAIL_BACK_BUFFER;
                self.holding_cell_htlc_updates.retain(|htlc_update| {
                        match htlc_update {
                                &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
 -                                      if *cltv_expiry <= height + HTLC_FAIL_BACK_BUFFER {
 +                                      if *cltv_expiry <= unforwarded_htlc_cltv_limit {
                                                timed_out_htlcs.push((source.clone(), payment_hash.clone()));
                                                false
                                        } else { true }
                        }
                });
  
 -              if self.funding_tx_confirmations > 0 {
 -                      self.funding_tx_confirmations += 1;
 +              self.update_time_counter = cmp::max(self.update_time_counter, highest_header_time);
 +
 +              if let Some(funding_locked) = self.check_get_funding_locked(height) {
 +                      return Ok((Some(funding_locked), timed_out_htlcs));
                }
  
                let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
 -              if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
 -                      for &(index_in_block, tx) in txdata.iter() {
 -                              let funding_txo = self.get_funding_txo().unwrap();
 -                              if tx.txid() == funding_txo.txid {
 -                                      let txo_idx = funding_txo.index as usize;
 -                                      if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
 -                                                      tx.output[txo_idx].value != self.channel_value_satoshis {
 -                                              if self.is_outbound() {
 -                                                      // If we generated the funding transaction and it doesn't match what it
 -                                                      // should, the client is really broken and we should just panic and
 -                                                      // tell them off. That said, because hash collisions happen with high
 -                                                      // probability in fuzztarget mode, if we're fuzzing we just close the
 -                                                      // channel and move on.
 -                                                      #[cfg(not(feature = "fuzztarget"))]
 -                                                      panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
 -                                              }
 -                                              self.channel_state = ChannelState::ShutdownComplete as u32;
 -                                              self.update_time_counter += 1;
 -                                              return Err(msgs::ErrorMessage {
 -                                                      channel_id: self.channel_id(),
 -                                                      data: "funding tx had wrong script/value".to_owned()
 -                                              });
 -                                      } else {
 -                                              if self.is_outbound() {
 -                                                      for input in tx.input.iter() {
 -                                                              if input.witness.is_empty() {
 -                                                                      // We generated a malleable funding transaction, implying we've
 -                                                                      // just exposed ourselves to funds loss to our counterparty.
 -                                                                      #[cfg(not(feature = "fuzztarget"))]
 -                                                                      panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
 -                                                              }
 -                                                      }
 -                                              }
 -                                              self.funding_tx_confirmations = 1;
 -                                              self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
 -                                                      Ok(scid) => Some(scid),
 -                                                      Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
 -                                              }
 -                                      }
 -                              }
 +              if non_shutdown_state >= ChannelState::ChannelFunded as u32 ||
 +                 (non_shutdown_state & ChannelState::OurFundingLocked as u32) == ChannelState::OurFundingLocked as u32 {
 +                      let mut funding_tx_confirmations = height as i64 - self.funding_tx_confirmation_height as i64 + 1;
 +                      if self.funding_tx_confirmation_height == 0 {
 +                              // Note that check_get_funding_locked may reset funding_tx_confirmation_height to
 +                              // zero if it has been reorged out, however in either case, our state flags
 +                              // indicate we've already sent a funding_locked
 +                              funding_tx_confirmations = 0;
                        }
 -              }
  
 -              self.update_time_counter = cmp::max(self.update_time_counter, header.time);
 -              if self.funding_tx_confirmations > 0 {
 -                      if self.funding_tx_confirmations == self.minimum_depth as u64 {
 -                              let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
 -                                      self.channel_state |= ChannelState::OurFundingLocked as u32;
 -                                      true
 -                              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
 -                                      self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
 -                                      self.update_time_counter += 1;
 -                                      true
 -                              } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
 -                                      // We got a reorg but not enough to trigger a force close, just update
 -                                      // funding_tx_confirmed_in and return.
 -                                      false
 -                              } else if self.channel_state < ChannelState::ChannelFunded as u32 {
 -                                      panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
 -                              } else {
 -                                      // We got a reorg but not enough to trigger a force close, just update
 -                                      // funding_tx_confirmed_in and return.
 -                                      false
 -                              };
 -                              self.funding_tx_confirmed_in = Some(header.block_hash());
 -
 -                              //TODO: Note that this must be a duplicate of the previous commitment point they sent us,
 -                              //as otherwise we will have a commitment transaction that they can't revoke (well, kinda,
 -                              //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
 -                              //a protocol oversight, but I assume I'm just missing something.
 -                              if need_commitment_update {
 -                                      if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
 -                                              let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
 -                                              return Ok((Some(msgs::FundingLocked {
 -                                                      channel_id: self.channel_id,
 -                                                      next_per_commitment_point,
 -                                              }), timed_out_htlcs));
 -                                      } else {
 -                                              self.monitor_pending_funding_locked = true;
 -                                              return Ok((None, timed_out_htlcs));
 -                                      }
 -                              }
 +                      // If we've sent funding_locked (or have both sent and received funding_locked), and
 +                      // the funding transaction's confirmation count has dipped below minimum_depth / 2,
 +                      // close the channel and hope we can get the latest state on chain (because presumably
 +                      // the funding transaction is at least still in the mempool of most nodes).
 +                      if funding_tx_confirmations < self.minimum_depth as i64 / 2 {
 +                              return Err(msgs::ErrorMessage {
 +                                      channel_id: self.channel_id(),
 +                                      data: format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.", self.minimum_depth, funding_tx_confirmations),
 +                              });
                        }
                }
 -              Ok((None, timed_out_htlcs))
 -      }
  
 -      /// Called by channelmanager based on chain blocks being disconnected.
 -      /// Returns true if we need to close the channel now due to funding transaction
 -      /// unconfirmation/reorg.
 -      pub fn block_disconnected(&mut self, header: &BlockHeader) -> bool {
 -              if self.funding_tx_confirmations > 0 {
 -                      self.funding_tx_confirmations -= 1;
 -                      if self.funding_tx_confirmations == UNCONF_THRESHOLD as u64 {
 -                              return true;
 -                      }
 -              }
 -              if Some(header.block_hash()) == self.funding_tx_confirmed_in {
 -                      self.funding_tx_confirmations = self.minimum_depth as u64 - 1;
 -              }
 -              false
 +              Ok((None, timed_out_htlcs))
        }
  
        // Methods to get unprompted messages to send to the remote end (or where we already returned
        /// Note that channel_id changes during this call!
        /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
        /// If an Err is returned, it is a ChannelError::Close.
-       pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
+       pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
                if !self.is_outbound() {
                        panic!("Tried to create outbound funding_created message on an inbound channel!");
                }
  
                self.channel_state = ChannelState::FundingCreated as u32;
                self.channel_id = funding_txo.to_channel_id();
+               self.funding_transaction = Some(funding_transaction);
  
                Ok(msgs::FundingCreated {
                        temporary_channel_id,
@@@ -4496,8 -4470,8 +4500,8 @@@ impl<Signer: Sign> Writeable for Channe
                }
  
                self.funding_tx_confirmed_in.write(writer)?;
 +              self.funding_tx_confirmation_height.write(writer)?;
                self.short_channel_id.write(writer)?;
 -              self.funding_tx_confirmations.write(writer)?;
  
                self.counterparty_dust_limit_satoshis.write(writer)?;
                self.holder_dust_limit_satoshis.write(writer)?;
                }
  
                self.channel_transaction_parameters.write(writer)?;
-               self.counterparty_cur_commitment_point.write(writer)?;
+               self.funding_transaction.write(writer)?;
  
+               self.counterparty_cur_commitment_point.write(writer)?;
                self.counterparty_prev_commitment_point.write(writer)?;
                self.counterparty_node_id.write(writer)?;
  
@@@ -4666,8 -4641,8 +4671,8 @@@ impl<'a, Signer: Sign, K: Deref> Readab
                };
  
                let funding_tx_confirmed_in = Readable::read(reader)?;
 +              let funding_tx_confirmation_height = Readable::read(reader)?;
                let short_channel_id = Readable::read(reader)?;
 -              let funding_tx_confirmations = Readable::read(reader)?;
  
                let counterparty_dust_limit_satoshis = Readable::read(reader)?;
                let holder_dust_limit_satoshis = Readable::read(reader)?;
                };
  
                let channel_parameters = Readable::read(reader)?;
+               let funding_transaction = Readable::read(reader)?;
                let counterparty_cur_commitment_point = Readable::read(reader)?;
  
                let counterparty_prev_commitment_point = Readable::read(reader)?;
                        last_sent_closing_fee,
  
                        funding_tx_confirmed_in,
 +                      funding_tx_confirmation_height,
                        short_channel_id,
 -                      funding_tx_confirmations,
  
                        counterparty_dust_limit_satoshis,
                        holder_dust_limit_satoshis,
                        counterparty_forwarding_info,
  
                        channel_transaction_parameters: channel_parameters,
-                       counterparty_cur_commitment_point,
+                       funding_transaction,
  
+                       counterparty_cur_commitment_point,
                        counterparty_prev_commitment_point,
                        counterparty_node_id,
  
@@@ -4792,14 -4770,14 +4800,14 @@@ mod tests 
        use bitcoin::hashes::hex::FromHex;
        use hex;
        use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
 -      use ln::channel::{Channel,Sign,InboundHTLCOutput,OutboundHTLCOutput,InboundHTLCState,OutboundHTLCState,HTLCOutputInCommitment,HTLCCandidate,HTLCInitiator,TxCreationKeys};
 +      use ln::channel::{Channel,InboundHTLCOutput,OutboundHTLCOutput,InboundHTLCState,OutboundHTLCState,HTLCOutputInCommitment,HTLCCandidate,HTLCInitiator,TxCreationKeys};
        use ln::channel::MAX_FUNDING_SATOSHIS;
        use ln::features::InitFeatures;
        use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
        use ln::chan_utils;
        use ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT};
        use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
 -      use chain::keysinterface::{InMemorySigner, KeysInterface};
 +      use chain::keysinterface::{InMemorySigner, KeysInterface, BaseSign};
        use chain::transaction::OutPoint;
        use util::config::UserConfig;
        use util::enforcing_trait_impls::EnforcingSigner;
                        value: 10000000, script_pubkey: output_script.clone(),
                }]};
                let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
-               let funding_created_msg = node_a_chan.get_outbound_funding_created(funding_outpoint, &&logger).unwrap();
+               let funding_created_msg = node_a_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap();
                let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, last_block_hash, &&logger).unwrap();
  
                // Node B --> Node A: funding signed
index efb2fe9c37bd9a8ec2c9a36d9a521072a6f8c6f3,2f7c17b58d75736dc688dbf8b586a72ea8634623..e87b221025f189ade4bff5ebde5af8908dfa62d9
@@@ -19,6 -19,7 +19,7 @@@
  //!
  
  use bitcoin::blockdata::block::{Block, BlockHeader};
+ use bitcoin::blockdata::transaction::Transaction;
  use bitcoin::blockdata::constants::genesis_block;
  use bitcoin::network::constants::Network;
  
@@@ -434,7 -435,6 +435,7 @@@ pub struct ChannelManager<Signer: Sign
        #[cfg(not(any(test, feature = "_test_utils")))]
        channel_state: Mutex<ChannelHolder<Signer>>,
        our_network_key: SecretKey,
 +      our_network_pubkey: PublicKey,
  
        /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
        /// value increases strictly since we don't assume access to a time source.
@@@ -823,6 -823,7 +824,6 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
  
                        latest_block_height: AtomicUsize::new(params.latest_height),
                        last_block_hash: RwLock::new(params.latest_hash),
 -                      secp_ctx,
  
                        channel_state: Mutex::new(ChannelHolder{
                                by_id: HashMap::new(),
                                pending_msg_events: Vec::new(),
                        }),
                        our_network_key: keys_manager.get_node_secret(),
 +                      our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
 +                      secp_ctx,
  
                        last_node_announcement_serial: AtomicUsize::new(0),
  
  
        /// Creates a new outbound channel to the given remote node and with the given value.
        ///
-       /// user_id will be provided back as user_channel_id in FundingGenerationReady and
-       /// FundingBroadcastSafe events to allow tracking of which events correspond with which
-       /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
-       /// may wish to avoid using 0 for user_id here.
+       /// user_id will be provided back as user_channel_id in FundingGenerationReady events to allow
+       /// tracking of which events correspond with which create_channel call. Note that the
+       /// user_channel_id defaults to 0 for inbound channels, so you may wish to avoid using 0 for
+       /// user_id here. user_id has no meaning inside of LDK, it is simply copied to events and
+       /// otherwise ignored.
        ///
        /// If successful, will generate a SendOpenChannel message event, so you should probably poll
        /// PeerManager::process_events afterwards.
                }
        }
  
 -      fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> {
 +      fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
                let mut chan = {
                        let mut channel_state_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_state_lock;
                        if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
                                if let Some(node_id) = peer_node_id {
                                        if chan.get().get_counterparty_node_id() != *node_id {
 -                                              // Error or Ok here doesn't matter - the result is only exposed publicly
 -                                              // when peer_node_id is None anyway.
 -                                              return Ok(());
 +                                              return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
                                        }
                                }
                                if let Some(short_id) = chan.get().get_short_channel_id() {
                        });
                }
  
 -              Ok(())
 +              Ok(chan.get_counterparty_node_id())
        }
  
        /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
        /// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
        pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
 -              self.force_close_channel_with_peer(channel_id, None)
 +              match self.force_close_channel_with_peer(channel_id, None) {
 +                      Ok(counterparty_node_id) => {
 +                              self.channel_state.lock().unwrap().pending_msg_events.push(
 +                                      events::MessageSendEvent::HandleError {
 +                                              node_id: counterparty_node_id,
 +                                              action: msgs::ErrorAction::SendErrorMessage {
 +                                                      msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
 +                                              },
 +                                      }
 +                              );
 +                              Ok(())
 +                      },
 +                      Err(e) => Err(e)
 +              }
        }
  
        /// Force close all channels, immediately broadcasting the latest local commitment transaction
  
        /// Call this upon creation of a funding transaction for the given channel.
        ///
-       /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
-       /// or your counterparty can steal your funds!
+       /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
+       /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
        ///
        /// Panics if a funding transaction has already been provided for this channel.
        ///
-       /// May panic if the funding_txo is duplicative with some other channel (note that this should
-       /// be trivially prevented by using unique funding transaction keys per-channel).
-       pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
+       /// May panic if the output found in the funding transaction is duplicative with some other
+       /// channel (note that this should be trivially prevented by using unique funding transaction
+       /// keys per-channel).
+       ///
+       /// Do NOT broadcast the funding transaction yourself. When we have safely received our
+       /// counterparty's signature the funding transaction will automatically be broadcast via the
+       /// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
+       ///
+       /// Note that this includes RBF or similar transaction replacement strategies - lightning does
+       /// not currently support replacing a funding transaction on an existing channel. Instead,
+       /// create a new channel with a conflicting funding transaction.
+       pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
                let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
  
+               for inp in funding_transaction.input.iter() {
+                       if inp.witness.is_empty() {
+                               return Err(APIError::APIMisuseError {
+                                       err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
+                               });
+                       }
+               }
                let (chan, msg) = {
                        let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
                                Some(mut chan) => {
-                                       (chan.get_outbound_funding_created(funding_txo, &self.logger)
+                                       let mut output_index = None;
+                                       let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
+                                       for (idx, outp) in funding_transaction.output.iter().enumerate() {
+                                               if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
+                                                       if output_index.is_some() {
+                                                               return Err(APIError::APIMisuseError {
+                                                                       err: "Multiple outputs matched the expected script and value".to_owned()
+                                                               });
+                                                       }
+                                                       if idx > u16::max_value() as usize {
+                                                               return Err(APIError::APIMisuseError {
+                                                                       err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+                                                               });
+                                                       }
+                                                       output_index = Some(idx as u16);
+                                               }
+                                       }
+                                       if output_index.is_none() {
+                                               return Err(APIError::APIMisuseError {
+                                                       err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
+                                               });
+                                       }
+                                       let funding_txo = OutPoint { txid: funding_transaction.txid(), index: output_index.unwrap() };
+                                       (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
                                                .map_err(|e| if let ChannelError::Close(msg) = e {
                                                        MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
                                                } else { unreachable!(); })
                                        , chan)
                                },
-                               None => return
+                               None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
                        };
                        match handle_error!(self, res, chan.get_counterparty_node_id()) {
                                Ok(funding_msg) => {
                                        (chan, funding_msg)
                                },
-                               Err(_) => { return; }
+                               Err(_) => { return Err(APIError::ChannelUnavailable {
+                                       err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+                               }) },
                        }
                };
  
                                e.insert(chan);
                        }
                }
+               Ok(())
        }
  
        fn get_announcement_sigs(&self, chan: &Channel<Signer>) -> Option<msgs::AnnouncementSignatures> {
  
        /// Gets the node_id held by this ChannelManager
        pub fn get_our_node_id(&self) -> PublicKey {
 -              PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
 +              self.our_network_pubkey.clone()
        }
  
        /// Restores a single, given channel to normal operation after a
                                return;
                        }
  
-                       let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(&self.logger);
+                       let (raa, commitment_update, order, pending_forwards, mut pending_failures, funding_broadcastable, funding_locked) = channel.monitor_updating_restored(&self.logger);
                        if !pending_forwards.is_empty() {
                                htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards));
                        }
                                        handle_cs!();
                                },
                        }
-                       if needs_broadcast_safe {
-                               pending_events.push(events::Event::FundingBroadcastSafe {
-                                       funding_txo: channel.get_funding_txo().unwrap(),
-                                       user_channel_id: channel.get_user_id(),
-                               });
+                       if let Some(tx) = funding_broadcastable {
+                               self.tx_broadcaster.broadcast_transaction(&tx);
                        }
                        if let Some(msg) = funding_locked {
                                pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
        }
  
        fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
-               let (funding_txo, user_id) = {
+               let funding_tx = {
                        let last_block_hash = *self.last_block_hash.read().unwrap();
                        let mut channel_lock = self.channel_state.lock().unwrap();
                        let channel_state = &mut *channel_lock;
                                        if chan.get().get_counterparty_node_id() != *counterparty_node_id {
                                                return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
                                        }
-                                       let monitor = match chan.get_mut().funding_signed(&msg, last_block_hash, &self.logger) {
+                                       let (monitor, funding_tx) = match chan.get_mut().funding_signed(&msg, last_block_hash, &self.logger) {
                                                Ok(update) => update,
                                                Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
                                        };
                                        if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
                                                return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
                                        }
-                                       (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
+                                       funding_tx
                                },
                                hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
                        }
                };
-               let mut pending_events = self.pending_events.lock().unwrap();
-               pending_events.push(events::Event::FundingBroadcastSafe {
-                       funding_txo,
-                       user_channel_id: user_id,
-               });
+               self.tx_broadcaster.broadcast_transaction(&funding_tx);
                Ok(())
        }
  
                                                                        msg: update
                                                                });
                                                        }
 +                                                      pending_msg_events.push(events::MessageSendEvent::HandleError {
 +                                                              node_id: chan.get_counterparty_node_id(),
 +                                                              action: msgs::ErrorAction::SendErrorMessage {
 +                                                                      msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
 +                                                              },
 +                                                      });
                                                }
                                        },
                                }
@@@ -3295,26 -3315,12 +3334,26 @@@ wher
        L::Target: Logger,
  {
        fn block_connected(&self, block: &Block, height: u32) {
 +              assert_eq!(*self.last_block_hash.read().unwrap(), block.header.prev_blockhash,
 +                      "Blocks must be connected in chain-order - the connected header must build on the last connected header");
 +              assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
 +                      "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
                let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
 -              ChannelManager::block_connected(self, &block.header, &txdata, height);
 +              self.transactions_confirmed(&block.header, height, &txdata);
 +              self.update_best_block(&block.header, height);
        }
  
 -      fn block_disconnected(&self, header: &BlockHeader, _height: u32) {
 -              ChannelManager::block_disconnected(self, header);
 +      fn block_disconnected(&self, header: &BlockHeader, height: u32) {
 +              assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
 +                      "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
 +
 +              let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
 +              let new_height = self.latest_block_height.fetch_sub(1, Ordering::AcqRel) as u32 - 1;
 +              assert_eq!(new_height, height - 1,
 +                      "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
 +              *self.last_block_hash.write().unwrap() = header.prev_blockhash;
 +
 +              self.do_chain_event(new_height, |channel| channel.update_best_block(new_height, header.time));
        }
  }
  
@@@ -3325,11 -3331,22 +3364,11 @@@ impl<Signer: Sign, M: Deref, T: Deref, 
          F::Target: FeeEstimator,
          L::Target: Logger,
  {
 -      /// Updates channel state based on transactions seen in a connected block.
 -      pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
 +      fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>>
 +                      (&self, height: u32, f: FN) {
                // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
                // during initialization prior to the chain_monitor being fully configured in some cases.
                // See the docs for `ChannelManagerReadArgs` for more.
 -              let block_hash = header.block_hash();
 -              log_trace!(self.logger, "Block {} at height {} connected", block_hash, height);
 -
 -              let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
 -
 -              assert_eq!(*self.last_block_hash.read().unwrap(), header.prev_blockhash,
 -                      "Blocks must be connected in chain-order - the connected header must build on the last connected header");
 -              assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
 -                      "Blocks must be connected in chain-order - the connected header must build on the last connected header");
 -              self.latest_block_height.store(height as usize, Ordering::Release);
 -              *self.last_block_hash.write().unwrap() = block_hash;
  
                let mut failed_channels = Vec::new();
                let mut timed_out_htlcs = Vec::new();
                        let short_to_id = &mut channel_state.short_to_id;
                        let pending_msg_events = &mut channel_state.pending_msg_events;
                        channel_state.by_id.retain(|_, channel| {
 -                              let res = channel.block_connected(header, txdata, height);
 +                              let res = f(channel);
                                if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
                                        for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
                                                let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
                                                short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
                                        }
                                } else if let Err(e) = res {
 +                                      if let Some(short_id) = channel.get_short_channel_id() {
 +                                              short_to_id.remove(&short_id);
 +                                      }
 +                                      // It looks like our counterparty went on-chain or funding transaction was
 +                                      // reorged out of the main chain. Close the channel.
 +                                      failed_channels.push(channel.force_shutdown(true));
 +                                      if let Ok(update) = self.get_channel_update(&channel) {
 +                                              pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 +                                                      msg: update
 +                                              });
 +                                      }
                                        pending_msg_events.push(events::MessageSendEvent::HandleError {
                                                node_id: channel.get_counterparty_node_id(),
                                                action: msgs::ErrorAction::SendErrorMessage { msg: e },
                                        });
                                        return false;
                                }
 -                              if let Some(funding_txo) = channel.get_funding_txo() {
 -                                      for &(_, tx) in txdata.iter() {
 -                                              for inp in tx.input.iter() {
 -                                                      if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
 -                                                              log_trace!(self.logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
 -                                                              if let Some(short_id) = channel.get_short_channel_id() {
 -                                                                      short_to_id.remove(&short_id);
 -                                                              }
 -                                                              // It looks like our counterparty went on-chain. Close the channel.
 -                                                              failed_channels.push(channel.force_shutdown(true));
 -                                                              if let Ok(update) = self.get_channel_update(&channel) {
 -                                                                      pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                                                                              msg: update
 -                                                                      });
 -                                                              }
 -                                                              return false;
 -                                                      }
 -                                              }
 -                                      }
 -                              }
                                true
                        });
  
                for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
                        self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
                }
 +      }
 +
 +      /// Updates channel state to take note of transactions which were confirmed in the given block
 +      /// at the given height.
 +      ///
 +      /// Note that you must still call (or have called) [`update_best_block`] with the block
 +      /// information which is included here.
 +      ///
 +      /// This method may be called before or after [`update_best_block`] for a given block's
 +      /// transaction data and may be called multiple times with additional transaction data for a
 +      /// given block.
 +      ///
 +      /// This method may be called for a previous block after an [`update_best_block`] call has
 +      /// been made for a later block, however it must *not* be called with transaction data from a
 +      /// block which is no longer in the best chain (ie where [`update_best_block`] has already
 +      /// been informed about a blockchain reorganization which no longer includes the block which
 +      /// corresponds to `header`).
 +      ///
 +      /// [`update_best_block`]: `Self::update_best_block`
 +      pub fn transactions_confirmed(&self, header: &BlockHeader, height: u32, txdata: &TransactionData) {
 +              // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
 +              // during initialization prior to the chain_monitor being fully configured in some cases.
 +              // See the docs for `ChannelManagerReadArgs` for more.
 +
 +              let block_hash = header.block_hash();
 +              log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
 +
 +              let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
 +              self.do_chain_event(height, |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new())));
 +      }
 +
 +      /// Updates channel state with the current best blockchain tip. You should attempt to call this
 +      /// quickly after a new block becomes available, however if multiple new blocks become
 +      /// available at the same time, only a single `update_best_block()` call needs to be made.
 +      ///
 +      /// This method should also be called immediately after any block disconnections, once at the
 +      /// reorganization fork point, and once with the new chain tip. Calling this method at the
 +      /// blockchain reorganization fork point ensures we learn when a funding transaction which was
 +      /// previously confirmed is reorganized out of the blockchain, ensuring we do not continue to
 +      /// accept payments which cannot be enforced on-chain.
 +      ///
 +      /// In both the block-connection and block-disconnection case, this method may be called either
 +      /// once per block connected or disconnected, or simply at the fork point and new tip(s),
 +      /// skipping any intermediary blocks.
 +      pub fn update_best_block(&self, header: &BlockHeader, height: u32) {
 +              // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
 +              // during initialization prior to the chain_monitor being fully configured in some cases.
 +              // See the docs for `ChannelManagerReadArgs` for more.
 +
 +              let block_hash = header.block_hash();
 +              log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
 +
 +              let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
 +
 +              self.latest_block_height.store(height as usize, Ordering::Release);
 +              *self.last_block_hash.write().unwrap() = block_hash;
 +
 +              self.do_chain_event(height, |channel| channel.update_best_block(height, header.time));
  
                loop {
                        // Update last_node_announcement_serial to be the max of its current value and the
                }
        }
  
 -      /// Updates channel state based on a disconnected block.
 -      ///
 -      /// If necessary, the channel may be force-closed without letting the counterparty participate
 -      /// in the shutdown.
 -      pub fn block_disconnected(&self, header: &BlockHeader) {
 -              // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
 -              // during initialization prior to the chain_monitor being fully configured in some cases.
 -              // See the docs for `ChannelManagerReadArgs` for more.
 -              let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
 -
 -              assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
 -                      "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
 -              self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
 -              *self.last_block_hash.write().unwrap() = header.prev_blockhash;
 -
 -              let mut failed_channels = Vec::new();
 -              {
 -                      let mut channel_lock = self.channel_state.lock().unwrap();
 -                      let channel_state = &mut *channel_lock;
 -                      let short_to_id = &mut channel_state.short_to_id;
 -                      let pending_msg_events = &mut channel_state.pending_msg_events;
 -                      channel_state.by_id.retain(|_,  v| {
 -                              if v.block_disconnected(header) {
 -                                      if let Some(short_id) = v.get_short_channel_id() {
 -                                              short_to_id.remove(&short_id);
 -                                      }
 -                                      failed_channels.push(v.force_shutdown(true));
 -                                      if let Ok(update) = self.get_channel_update(&v) {
 -                                              pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
 -                                                      msg: update
 -                                              });
 -                                      }
 -                                      false
 -                              } else {
 -                                      true
 -                              }
 -                      });
 -              }
 -
 -              self.handle_init_event_channel_failures(failed_channels);
 -      }
 -
        /// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
        /// indicating whether persistence is necessary. Only one listener on
        /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
@@@ -4347,6 -4357,7 +4386,6 @@@ impl<'a, Signer: Sign, M: Deref, T: Der
  
                        latest_block_height: AtomicUsize::new(latest_block_height as usize),
                        last_block_hash: RwLock::new(last_block_hash),
 -                      secp_ctx,
  
                        channel_state: Mutex::new(ChannelHolder {
                                by_id,
                                pending_msg_events: Vec::new(),
                        }),
                        our_network_key: args.keys_manager.get_node_secret(),
 +                      our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
 +                      secp_ctx,
  
                        last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
  
@@@ -4434,154 -4443,3 +4473,154 @@@ mod tests 
                }
        }
  }
 +
 +#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
 +pub mod bench {
 +      use chain::Listen;
 +      use chain::chainmonitor::ChainMonitor;
 +      use chain::channelmonitor::Persist;
 +      use chain::keysinterface::{KeysManager, InMemorySigner};
 +      use chain::transaction::OutPoint;
 +      use ln::channelmanager::{ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
 +      use ln::features::InitFeatures;
 +      use ln::functional_test_utils::*;
 +      use ln::msgs::ChannelMessageHandler;
 +      use routing::network_graph::NetworkGraph;
 +      use routing::router::get_route;
 +      use util::test_utils;
 +      use util::config::UserConfig;
 +      use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
 +
 +      use bitcoin::hashes::Hash;
 +      use bitcoin::hashes::sha256::Hash as Sha256;
 +      use bitcoin::{Block, BlockHeader, Transaction, TxOut};
 +
 +      use std::sync::Mutex;
 +
 +      use test::Bencher;
 +
 +      struct NodeHolder<'a, P: Persist<InMemorySigner>> {
 +              node: &'a ChannelManager<InMemorySigner,
 +                      &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
 +                              &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
 +                              &'a test_utils::TestLogger, &'a P>,
 +                      &'a test_utils::TestBroadcaster, &'a KeysManager,
 +                      &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>
 +      }
 +
 +      #[cfg(test)]
 +      #[bench]
 +      fn bench_sends(bench: &mut Bencher) {
 +              bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
 +      }
 +
 +      pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
 +              // Do a simple benchmark of sending a payment back and forth between two nodes.
 +              // Note that this is unrealistic as each payment send will require at least two fsync
 +              // calls per node.
 +              let network = bitcoin::Network::Testnet;
 +              let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
 +
 +              let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
 +              let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
 +
 +              let mut config: UserConfig = Default::default();
 +              config.own_channel_config.minimum_depth = 1;
 +
 +              let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
 +              let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
 +              let seed_a = [1u8; 32];
 +              let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
 +              let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &logger_a, &keys_manager_a, config.clone(), ChainParameters {
 +                      network,
 +                      latest_hash: genesis_hash,
 +                      latest_height: 0,
 +              });
 +              let node_a_holder = NodeHolder { node: &node_a };
 +
 +              let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
 +              let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
 +              let seed_b = [2u8; 32];
 +              let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
 +              let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &logger_b, &keys_manager_b, config.clone(), ChainParameters {
 +                      network,
 +                      latest_hash: genesis_hash,
 +                      latest_height: 0,
 +              });
 +              let node_b_holder = NodeHolder { node: &node_b };
 +
 +              node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
 +              node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
 +              node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
 +
 +              let tx;
 +              if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
 +                      tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
 +                              value: 8_000_000, script_pubkey: output_script,
 +                      }]};
 +                      let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 };
 +                      node_a.funding_transaction_generated(&temporary_channel_id, funding_outpoint);
 +              } else { panic!(); }
 +
 +              node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
 +              node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
 +
 +              get_event!(node_a_holder, Event::FundingBroadcastSafe);
 +
 +              let block = Block {
 +                      header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
 +                      txdata: vec![tx],
 +              };
 +              Listen::block_connected(&node_a, &block, 1);
 +              Listen::block_connected(&node_b, &block, 1);
 +
 +              node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
 +              node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
 +
 +              let dummy_graph = NetworkGraph::new(genesis_hash);
 +
 +              macro_rules! send_payment {
 +                      ($node_a: expr, $node_b: expr) => {
 +                              let usable_channels = $node_a.list_usable_channels();
 +                              let route = get_route(&$node_a.get_our_node_id(), &dummy_graph, &$node_b.get_our_node_id(), None, Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a).unwrap();
 +
 +                              let payment_preimage = PaymentPreimage([0; 32]);
 +                              let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
 +
 +                              $node_a.send_payment(&route, payment_hash, &None).unwrap();
 +                              let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
 +                              $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
 +                              $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
 +                              let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
 +                              $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
 +                              $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
 +                              $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
 +
 +                              expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
 +                              expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, 10_000);
 +                              assert!($node_b.claim_funds(payment_preimage, &None, 10_000));
 +
 +                              match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
 +                                      MessageSendEvent::UpdateHTLCs { node_id, updates } => {
 +                                              assert_eq!(node_id, $node_a.get_our_node_id());
 +                                              $node_a.handle_update_fulfill_htlc(&$node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
 +                                              $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &updates.commitment_signed);
 +                                      },
 +                                      _ => panic!("Failed to generate claim event"),
 +                              }
 +
 +                              let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
 +                              $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
 +                              $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
 +                              $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
 +
 +                              expect_payment_sent!(NodeHolder { node: &$node_a }, payment_preimage);
 +                      }
 +              }
 +
 +              bench.iter(|| {
 +                      send_payment!(node_a, node_b);
 +                      send_payment!(node_b, node_a);
 +              });
 +      }
 +}
index af11ef5d86baf01c98d0d815a753ac23ffc3535b,2ebdac5d031f4a00b0efe2df418744a3b54199b9..da3e19c04e09afdf85b5878e5322ddd84d2aff3f
@@@ -10,7 -10,7 +10,7 @@@
  //! A bunch of useful utilities for building networks of nodes and exchanging messages between
  //! nodes for functional tests.
  
 -use chain::Watch;
 +use chain::{Listen, Watch};
  use chain::channelmonitor::ChannelMonitor;
  use chain::transaction::OutPoint;
  use ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure};
@@@ -60,15 -60,21 +60,15 @@@ pub fn mine_transaction<'a, 'b, 'c, 'd>
  /// Mine the given transaction at the given height, mining blocks as required to build to that
  /// height
  pub fn confirm_transaction_at<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, tx: &Transaction, conf_height: u32) {
 -      let starting_block = node.best_block_info();
 +      let first_connect_height = node.best_block_info().1 + 1;
 +      assert!(first_connect_height <= conf_height);
 +      if conf_height - first_connect_height >= 1 {
 +              connect_blocks(node, conf_height - first_connect_height);
 +      }
        let mut block = Block {
 -              header: BlockHeader { version: 0x20000000, prev_blockhash: starting_block.0, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
 +              header: BlockHeader { version: 0x20000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
                txdata: Vec::new(),
        };
 -      let height = starting_block.1 + 1;
 -      assert!(height <= conf_height);
 -      for _ in height..conf_height {
 -              connect_block(node, &block);
 -              block = Block {
 -                      header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
 -                      txdata: vec![],
 -              };
 -      }
 -
        for _ in 0..*node.network_chan_count.borrow() { // Make sure we don't end up with channels at the same short id by offsetting by chan_count
                block.txdata.push(Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() });
        }
        connect_block(node, &block);
  }
  
 +/// The possible ways we may notify a ChannelManager of a new block
 +pub enum ConnectStyle {
 +      /// Calls update_best_block first, detecting transactions in the block only after receiving the
 +      /// header and height information.
 +      BestBlockFirst,
 +      /// The same as BestBlockFirst, however when we have multiple blocks to connect, we only
 +      /// make a single update_best_block call.
 +      BestBlockFirstSkippingBlocks,
 +      /// Calls transactions_confirmed first, detecting transactions in the block before updating the
 +      /// header and height information.
 +      TransactionsFirst,
 +      /// The same as TransactionsFirst, however when we have multiple blocks to connect, we only
 +      /// make a single update_best_block call.
 +      TransactionsFirstSkippingBlocks,
 +      /// Provides the full block via the chain::Listen interface. In the current code this is
 +      /// equivalent to TransactionsFirst with some additional assertions.
 +      FullBlockViaListen,
 +}
 +
  pub fn connect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, depth: u32) -> BlockHash {
 +      let skip_intermediaries = match *node.connect_style.borrow() {
 +              ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => true,
 +              _ => false,
 +      };
 +
        let mut block = Block {
                header: BlockHeader { version: 0x2000000, prev_blockhash: node.best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
                txdata: vec![],
        };
 -      connect_block(node, &block);
 -      for _ in 2..depth + 1 {
 +      assert!(depth >= 1);
 +      for _ in 0..depth - 1 {
 +              do_connect_block(node, &block, skip_intermediaries);
                block = Block {
                        header: BlockHeader { version: 0x20000000, prev_blockhash: block.header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
                        txdata: vec![],
                };
 -              connect_block(node, &block);
        }
 +      connect_block(node, &block);
        block.header.block_hash()
  }
  
  pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) {
 +      do_connect_block(node, block, false);
 +}
 +
 +fn do_connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block, skip_manager: bool) {
        let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
        let height = node.best_block_info().1 + 1;
        node.chain_monitor.chain_monitor.block_connected(&block.header, &txdata, height);
 -      node.node.block_connected(&block.header, &txdata, height);
 +      if !skip_manager {
 +              match *node.connect_style.borrow() {
 +                      ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstSkippingBlocks => {
 +                              node.node.update_best_block(&block.header, height);
 +                              node.node.transactions_confirmed(&block.header, height, &block.txdata.iter().enumerate().collect::<Vec<_>>());
 +                      },
 +                      ConnectStyle::TransactionsFirst|ConnectStyle::TransactionsFirstSkippingBlocks => {
 +                              node.node.transactions_confirmed(&block.header, height, &block.txdata.iter().enumerate().collect::<Vec<_>>());
 +                              node.node.update_best_block(&block.header, height);
 +                      },
 +                      ConnectStyle::FullBlockViaListen => {
 +                              Listen::block_connected(node.node, &block, height);
 +                      }
 +              }
 +      }
        node.node.test_process_background_events();
        node.blocks.borrow_mut().push((block.header, height));
  }
  
  pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) {
 -      for _ in 0..count {
 +      for i in 0..count {
                let orig_header = node.blocks.borrow_mut().pop().unwrap();
                assert!(orig_header.1 > 0); // Cannot disconnect genesis
 +              let prev_header = node.blocks.borrow().last().unwrap().clone();
 +
                node.chain_monitor.chain_monitor.block_disconnected(&orig_header.0, orig_header.1);
 -              node.node.block_disconnected(&orig_header.0);
 +              match *node.connect_style.borrow() {
 +                      ConnectStyle::FullBlockViaListen => {
 +                              Listen::block_disconnected(node.node, &orig_header.0, orig_header.1);
 +                      },
 +                      ConnectStyle::BestBlockFirstSkippingBlocks|ConnectStyle::TransactionsFirstSkippingBlocks => {
 +                              if i == count - 1 {
 +                                      node.node.update_best_block(&prev_header.0, prev_header.1);
 +                              }
 +                      },
 +                      _ => {
 +                              node.node.update_best_block(&prev_header.0, prev_header.1);
 +                      },
 +              }
        }
  }
  
@@@ -203,7 -152,6 +203,7 @@@ pub struct Node<'a, 'b: 'a, 'c: 'b> 
        pub network_chan_count: Rc<RefCell<u32>>,
        pub logger: &'c test_utils::TestLogger,
        pub blocks: RefCell<Vec<(BlockHeader, u32)>>,
 +      pub connect_style: Rc<RefCell<ConnectStyle>>,
  }
  impl<'a, 'b, 'c> Node<'a, 'b, 'c> {
        pub fn best_block_hash(&self) -> BlockHash {
@@@ -365,24 -313,6 +365,24 @@@ macro_rules! get_event_msg 
        }
  }
  
 +/// Get a specific event from the pending events queue.
 +#[macro_export]
 +macro_rules! get_event {
 +      ($node: expr, $event_type: path) => {
 +              {
 +                      let mut events = $node.node.get_and_clear_pending_events();
 +                      assert_eq!(events.len(), 1);
 +                      let ev = events.pop().unwrap();
 +                      match ev {
 +                              $event_type { .. } => {
 +                                      ev
 +                              },
 +                              _ => panic!("Unexpected event"),
 +                      }
 +              }
 +      }
 +}
 +
  #[cfg(test)]
  macro_rules! get_htlc_update_msgs {
        ($node: expr, $node_id: expr) => {
@@@ -411,8 -341,7 +411,8 @@@ macro_rules! get_feerate 
        }
  }
  
 -#[cfg(test)]
 +/// Returns any local commitment transactions for the channel.
 +#[macro_export]
  macro_rules! get_local_commitment_txn {
        ($node: expr, $channel_id: expr) => {
                {
@@@ -492,7 -421,7 +492,7 @@@ pub fn create_chan_between_nodes_with_v
  
        let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42);
  
-       node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
+       node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
        check_added_monitors!(node_a, 0);
  
        node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
        }
  
        let events_4 = node_a.node.get_and_clear_pending_events();
-       assert_eq!(events_4.len(), 1);
-       match events_4[0] {
-               Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
-                       assert_eq!(user_channel_id, 42);
-                       assert_eq!(*funding_txo, funding_output);
-               },
-               _ => panic!("Unexpected event"),
-       };
+       assert_eq!(events_4.len(), 0);
+       assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+       assert_eq!(node_a.tx_broadcaster.txn_broadcasted.lock().unwrap()[0], tx);
+       node_a.tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
  
        tx
  }
@@@ -918,7 -844,7 +915,7 @@@ macro_rules! expect_pending_htlcs_forwa
        }}
  }
  
 -#[cfg(test)]
 +#[cfg(any(test, feature = "unstable"))]
  macro_rules! expect_payment_received {
        ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
                let events = $node.node.get_and_clear_pending_events();
@@@ -1295,7 -1221,6 +1292,7 @@@ pub fn create_network<'a, 'b: 'a, 'c: '
        let mut nodes = Vec::new();
        let chan_count = Rc::new(RefCell::new(0));
        let payment_count = Rc::new(RefCell::new(0));
 +      let connect_style = Rc::new(RefCell::new(ConnectStyle::FullBlockViaListen));
  
        for i in 0..node_count {
                let net_graph_msg_handler = NetGraphMsgHandler::new(cfgs[i].chain_source.genesis_hash, None, cfgs[i].logger);
                                 keys_manager: &cfgs[i].keys_manager, node: &chan_mgrs[i], net_graph_msg_handler,
                                 node_seed: cfgs[i].node_seed, network_chan_count: chan_count.clone(),
                                 network_payment_count: payment_count.clone(), logger: cfgs[i].logger,
 -                               blocks: RefCell::new(vec![(genesis_block(Network::Testnet).header, 0)])
 +                               blocks: RefCell::new(vec![(genesis_block(Network::Testnet).header, 0)]),
 +                               connect_style: Rc::clone(&connect_style),
                })
        }
  
@@@ -1418,36 -1342,22 +1415,36 @@@ pub fn check_preimage_claim<'a, 'b, 'c>
  
  pub fn get_announce_close_broadcast_events<'a, 'b, 'c>(nodes: &Vec<Node<'a, 'b, 'c>>, a: usize, b: usize)  {
        let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
 -      assert_eq!(events_1.len(), 1);
 +      assert_eq!(events_1.len(), 2);
        let as_update = match events_1[0] {
                MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                        msg.clone()
                },
                _ => panic!("Unexpected event"),
        };
 +      match events_1[1] {
 +              MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
 +                      assert_eq!(node_id, nodes[b].node.get_our_node_id());
 +                      assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
  
        let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
 -      assert_eq!(events_2.len(), 1);
 +      assert_eq!(events_2.len(), 2);
        let bs_update = match events_2[0] {
                MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                        msg.clone()
                },
                _ => panic!("Unexpected event"),
        };
 +      match events_2[1] {
 +              MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
 +                      assert_eq!(node_id, nodes[a].node.get_our_node_id());
 +                      assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
  
        for node in nodes {
                node.net_graph_msg_handler.handle_channel_update(&as_update).unwrap();
index 0d039bc56c70304155e9d5668dd4baf641b11c92,7c198e4803b97c7a5f1b3dbc5dad3705cbeadd97..22c0af4070b1b2fe25444e631347f118914bc46a
@@@ -16,7 -16,7 +16,7 @@@ use chain::Watch
  use chain::channelmonitor;
  use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
  use chain::transaction::OutPoint;
 -use chain::keysinterface::{Sign, KeysInterface};
 +use chain::keysinterface::{KeysInterface, BaseSign};
  use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
  use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure, BREAKDOWN_TIMEOUT};
  use ln::channel::{Channel, ChannelError};
@@@ -394,7 -394,8 +394,7 @@@ fn test_multi_flight_update_fee() 
        check_added_monitors!(nodes[1], 1);
  }
  
 -#[test]
 -fn test_1_conf_open() {
 +fn do_test_1_conf_open(connect_style: ConnectStyle) {
        // Previously, if the minium_depth config was set to 1, we'd never send a funding_locked. This
        // tests that we properly send one in that case.
        let mut alice_config = UserConfig::default();
        let chanmon_cfgs = create_chanmon_cfgs(2);
        let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
        let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(alice_config), Some(bob_config)]);
 -      let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +      let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
 +      *nodes[0].connect_style.borrow_mut() = connect_style;
  
        let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
        mine_transaction(&nodes[1], &tx);
                node.net_graph_msg_handler.handle_channel_update(&bs_update).unwrap();
        }
  }
 +#[test]
 +fn test_1_conf_open() {
 +      do_test_1_conf_open(ConnectStyle::BestBlockFirst);
 +      do_test_1_conf_open(ConnectStyle::TransactionsFirst);
 +      do_test_1_conf_open(ConnectStyle::FullBlockViaListen);
 +}
  
  fn do_test_sanity_on_in_flight_opens(steps: u8) {
        // Previously, we had issues deserializing channels when we hadn't connected the first block
        let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
  
        if steps & 0x0f == 3 { return; }
-       nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
        check_added_monitors!(nodes[0], 0);
        let funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
  
        }
  
        let events_4 = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events_4.len(), 1);
-       match events_4[0] {
-               Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
-                       assert_eq!(user_channel_id, 42);
-                       assert_eq!(*funding_txo, funding_output);
-               },
-               _ => panic!("Unexpected event"),
-       };
+       assert_eq!(events_4.len(), 0);
  
        if steps & 0x0f == 6 { return; }
        create_chan_between_nodes_with_value_confirm_first(&nodes[0], &nodes[1], &tx, 2);
@@@ -1514,14 -1501,10 +1507,14 @@@ fn test_duplicate_htlc_different_direct
        check_spends!(htlc_pair.1, remote_txn[0]);
  
        let events = nodes[0].node.get_and_clear_pending_msg_events();
 -      assert_eq!(events.len(), 2);
 +      assert_eq!(events.len(), 3);
        for e in events {
                match e {
                        MessageSendEvent::BroadcastChannelUpdate { .. } => {},
 +                      MessageSendEvent::HandleError { node_id, action: msgs::ErrorAction::SendErrorMessage { ref msg } } => {
 +                              assert_eq!(node_id, nodes[1].node.get_our_node_id());
 +                              assert_eq!(msg.data, "Commitment or closing transaction was confirmed on chain.");
 +                      },
                        MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
                                assert!(update_add_htlcs.is_empty());
                                assert!(update_fail_htlcs.is_empty());
@@@ -2344,7 -2327,6 +2337,7 @@@ fn channel_monitor_network_test() 
        // Simple case with no pending HTLCs:
        nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
        check_added_monitors!(nodes[1], 1);
 +      check_closed_broadcast!(nodes[1], false);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
                assert_eq!(node_txn.len(), 1);
                check_added_monitors!(nodes[0], 1);
                test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
        }
 -      get_announce_close_broadcast_events(&nodes, 0, 1);
 +      check_closed_broadcast!(nodes[0], true);
        assert_eq!(nodes[0].node.list_channels().len(), 0);
        assert_eq!(nodes[1].node.list_channels().len(), 1);
  
  
        // Simple case of one pending HTLC to HTLC-Timeout
        nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
 +      check_closed_broadcast!(nodes[1], false);
        check_added_monitors!(nodes[1], 1);
        {
                let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
                check_added_monitors!(nodes[2], 1);
                test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
        }
 -      get_announce_close_broadcast_events(&nodes, 1, 2);
 +      check_closed_broadcast!(nodes[2], true);
        assert_eq!(nodes[1].node.list_channels().len(), 0);
        assert_eq!(nodes[2].node.list_channels().len(), 1);
  
        // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
        nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
        check_added_monitors!(nodes[2], 1);
 +      check_closed_broadcast!(nodes[2], false);
        let node2_commitment_txid;
        {
                let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
                check_added_monitors!(nodes[3], 1);
                check_preimage_claim(&nodes[3], &node_txn);
        }
 -      get_announce_close_broadcast_events(&nodes, 2, 3);
 +      check_closed_broadcast!(nodes[3], true);
        assert_eq!(nodes[2].node.list_channels().len(), 0);
        assert_eq!(nodes[3].node.list_channels().len(), 1);
  
        let (close_chan_update_1, close_chan_update_2) = {
                connect_blocks(&nodes[3], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
                let events = nodes[3].node.get_and_clear_pending_msg_events();
 -              assert_eq!(events.len(), 1);
 +              assert_eq!(events.len(), 2);
                let close_chan_update_1 = match events[0] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                msg.clone()
                        },
                        _ => panic!("Unexpected event"),
                };
 +              match events[1] {
 +                      MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
 +                              assert_eq!(node_id, nodes[4].node.get_our_node_id());
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
                check_added_monitors!(nodes[3], 1);
  
                // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer.
  
                connect_blocks(&nodes[4], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2);
                let events = nodes[4].node.get_and_clear_pending_msg_events();
 -              assert_eq!(events.len(), 1);
 +              assert_eq!(events.len(), 2);
                let close_chan_update_2 = match events[0] {
                        MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
                                msg.clone()
                        },
                        _ => panic!("Unexpected event"),
                };
 +              match events[1] {
 +                      MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id } => {
 +                              assert_eq!(node_id, nodes[3].node.get_our_node_id());
 +                      },
 +                      _ => panic!("Unexpected event"),
 +              }
                check_added_monitors!(nodes[4], 1);
                test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
  
@@@ -2810,7 -2778,7 +2803,7 @@@ fn test_htlc_on_chain_success() 
        assert_eq!(updates.update_fulfill_htlcs.len(), 1);
  
        mine_transaction(&nodes[2], &commitment_tx[0]);
 -      check_closed_broadcast!(nodes[2], false);
 +      check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx, 2*htlc-success tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
        assert_eq!(node_txn.len(), 5);
                assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
                added_monitors.clear();
        }
 -      assert_eq!(events.len(), 2);
 +      assert_eq!(events.len(), 3);
        match events[0] {
                MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
        }
        match events[1] {
 +              MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +
 +      match events[2] {
                MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
                        assert!(update_add_htlcs.is_empty());
                        assert!(update_fail_htlcs.is_empty());
        let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2);
        check_spends!(commitment_tx[0], chan_1.3);
        mine_transaction(&nodes[1], &commitment_tx[0]);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 3 (commitment tx + HTLC-Sucess * 2), ChannelMonitor : 1 (HTLC-Success)
        assert_eq!(node_txn.len(), 4);
        // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
        connect_block(&nodes[0], &Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] });
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let events = nodes[0].node.get_and_clear_pending_events();
        assert_eq!(events.len(), 2);
@@@ -2994,7 -2957,7 +2987,7 @@@ fn test_htlc_on_chain_timeout() 
                _ => panic!("Unexpected event"),
        };
        mine_transaction(&nodes[2], &commitment_tx[0]);
 -      check_closed_broadcast!(nodes[2], false);
 +      check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
        assert_eq!(node_txn.len(), 1);
  
        mine_transaction(&nodes[1], &timeout_tx);
        check_added_monitors!(nodes[1], 1);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        {
                // B will rebroadcast a fee-bumped timeout transaction here.
                let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
  
        mine_transaction(&nodes[0], &commitment_tx[0]);
  
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 timeout tx
        assert_eq!(node_txn.len(), 3);
@@@ -3100,7 -3063,7 +3093,7 @@@ fn test_simple_commitment_revoked_fail_
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
        check_added_monitors!(nodes[1], 1);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
  
        expect_pending_htlcs_forwardable!(nodes[1]);
        check_added_monitors!(nodes[1], 1);
@@@ -3271,18 -3234,11 +3264,18 @@@ fn do_test_commitment_revoked_fail_back
        check_added_monitors!(nodes[1], 1);
  
        let events = nodes[1].node.get_and_clear_pending_msg_events();
 -      assert_eq!(events.len(), if deliver_bs_raa { 3 } else { 2 });
 +      assert_eq!(events.len(), if deliver_bs_raa { 4 } else { 3 });
        match events[if deliver_bs_raa { 1 } else { 0 }] {
                MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
                _ => panic!("Unexpected event"),
        }
 +      match events[if deliver_bs_raa { 2 } else { 1 }] {
 +              MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage { channel_id, ref data } }, node_id: _ } => {
 +                      assert_eq!(channel_id, chan_2.2);
 +                      assert_eq!(data.as_str(), "Commitment or closing transaction was confirmed on chain.");
 +              },
 +              _ => panic!("Unexpected event"),
 +      }
        if deliver_bs_raa {
                match events[0] {
                        MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
                        _ => panic!("Unexpected event"),
                }
        }
 -      match events[if deliver_bs_raa { 2 } else { 1 }] {
 +      match events[if deliver_bs_raa { 3 } else { 2 }] {
                MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
                        assert!(update_add_htlcs.is_empty());
                        assert_eq!(update_fail_htlcs.len(), 3);
@@@ -3444,7 -3400,7 +3437,7 @@@ fn test_htlc_ignore_latest_remote_commi
  
        route_payment(&nodes[0], &[&nodes[1]], 10000000);
        nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
  
        let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[0].clone(), node_txn[1].clone()]});
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  
        // Duplicate the connect_block call since this may happen due to other listeners
@@@ -3506,7 -3462,7 +3499,7 @@@ fn test_force_close_fail_back() 
        // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
  
        nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
 -      check_closed_broadcast!(nodes[2], false);
 +      check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
        let tx = {
                let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
        mine_transaction(&nodes[1], &tx);
  
        // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  
        // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
@@@ -4357,7 -4313,7 +4350,7 @@@ fn test_manager_serialize_deserialize_e
        let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
        let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
  
-       // Start creating a channel, but stop right before broadcasting the event message FundingBroadcastSafe
+       // Start creating a channel, but stop right before broadcasting the funding transaction
        let channel_value = 100000;
        let push_msat = 10001;
        let a_flags = InitFeatures::known();
  
        let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&node_a, channel_value, 42);
  
-       node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
+       node_a.node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
        check_added_monitors!(node_a, 0);
  
        node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id()));
                assert_eq!(added_monitors[0].0, funding_output);
                added_monitors.clear();
        }
-       // Normally, this is where node_a would check for a FundingBroadcastSafe event, but the test de/serializes first instead
+       // Normally, this is where node_a would broadcast the funding transaction, but the test de/serializes first instead
  
        nodes.push(node_a);
        nodes.push(node_b);
        assert!(nodes[0].chain_monitor.watch_channel(chan_0_monitor.get_funding_txo().0, chan_0_monitor).is_ok());
        nodes[0].node = &nodes_0_deserialized;
  
-       // After deserializing, make sure the FundingBroadcastSafe event is still held by the channel manager
+       // After deserializing, make sure the funding_transaction is still held by the channel manager
        let events_4 = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events_4.len(), 1);
-       match events_4[0] {
-               Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
-                       assert_eq!(user_channel_id, 42);
-                       assert_eq!(*funding_txo, funding_output);
-               },
-               _ => panic!("Unexpected event"),
-       };
+       assert_eq!(events_4.len(), 0);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
  
        // Make sure the channel is functioning as though the de/serialization never happened
        assert_eq!(nodes[0].node.list_channels().len(), 1);
@@@ -4695,7 -4646,7 +4683,7 @@@ fn test_claim_sizeable_push_msat() 
  
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
        nodes[1].node.force_close_channel(&chan.2).unwrap();
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn.len(), 1);
@@@ -4721,7 -4672,7 +4709,7 @@@ fn test_claim_on_remote_sizeable_push_m
  
        let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
        nodes[0].node.force_close_channel(&chan.2).unwrap();
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
  
        mine_transaction(&nodes[1], &node_txn[0]);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
  
@@@ -4757,7 -4708,7 +4745,7 @@@ fn test_claim_on_remote_revoked_sizeabl
  
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -4883,7 -4834,7 +4871,7 @@@ fn test_static_spendable_outputs_justic
        claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage, 3_000_000);
  
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -4919,7 -4870,7 +4907,7 @@@ fn test_static_spendable_outputs_justic
  
        // A will generate HTLC-Timeout from revoked commitment tx
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  
        let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
        // B will generate justice tx from A's revoked commitment/HTLC tx
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  
        let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -4988,7 -4939,7 +4976,7 @@@ fn test_static_spendable_outputs_justic
  
        // B will generate HTLC-Success from revoked commitment tx
        mine_transaction(&nodes[1], &revoked_local_txn[0]);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
  
        // A will generate justice tx from B's revoked commitment/HTLC tx
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[0].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        connect_block(&nodes[0], &Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] });
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  
        let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -5078,7 -5029,7 +5066,7 @@@ fn test_onchain_to_onchain_claim() 
        assert!(updates.update_fail_malformed_htlcs.is_empty());
  
        mine_transaction(&nodes[2], &commitment_tx[0]);
 -      check_closed_broadcast!(nodes[2], false);
 +      check_closed_broadcast!(nodes[2], true);
        check_added_monitors!(nodes[2], 1);
  
        let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
        }
        check_added_monitors!(nodes[1], 1);
        let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
 +      assert_eq!(msg_events.len(), 3);
        check_added_monitors!(nodes[1], 1);
        match msg_events[0] {
 -              MessageSendEvent::BroadcastChannelUpdate {  .. } => {},
 +              MessageSendEvent::BroadcastChannelUpdate { .. } => {},
                _ => panic!("Unexpected event"),
        }
        match msg_events[1] {
 +              MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { .. }, node_id: _ } => {},
 +              _ => panic!("Unexpected event"),
 +      }
 +      match msg_events[2] {
                MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
                        assert!(update_add_htlcs.is_empty());
                        assert!(update_fail_htlcs.is_empty());
        assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
        assert_eq!(b_txn[0].lock_time, 0); // Success tx
  
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  }
  
@@@ -5170,7 -5116,7 +5158,7 @@@ fn test_duplicate_payment_hash_one_fail
        check_spends!(commitment_txn[0], chan_2.3);
  
        mine_transaction(&nodes[1], &commitment_txn[0]);
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  
        let htlc_timeout_tx;
@@@ -5450,7 -5396,7 +5438,7 @@@ fn do_test_fail_backwards_unrevoked_rem
                mine_transaction(&nodes[2], &ds_prev_commitment_tx[0]);
        }
        connect_blocks(&nodes[2], ANTI_REORG_DELAY - 1);
 -      check_closed_broadcast!(nodes[2], false);
 +      check_closed_broadcast!(nodes[2], true);
        expect_pending_htlcs_forwardable!(nodes[2]);
        check_added_monitors!(nodes[2], 3);
  
@@@ -5587,7 -5533,7 +5575,7 @@@ fn test_dynamic_spendable_outputs_local
  
        // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
        mine_transaction(&nodes[0], &local_txn[0]);
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  
        let htlc_timeout = {
@@@ -5655,7 -5601,7 +5643,7 @@@ fn test_key_derivation_params() 
  
        // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
        mine_transaction(&nodes[0], &local_txn_1[0]);
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  
        let htlc_timeout = {
@@@ -5747,7 -5693,7 +5735,7 @@@ fn do_htlc_claim_local_commitment_only(
                block.header.prev_blockhash = block.block_hash();
        }
        test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  }
  
@@@ -5779,7 -5725,7 +5767,7 @@@ fn do_htlc_claim_current_remote_commitm
                header.prev_blockhash = header.block_hash();
        }
        test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  }
  
@@@ -5827,7 -5773,7 +5815,7 @@@ fn do_htlc_claim_previous_remote_commit
        }
        if !check_revoke_no_close {
                test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
 -              check_closed_broadcast!(nodes[0], false);
 +              check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
        } else {
                expect_payment_failed!(nodes[0], our_payment_hash, true);
@@@ -7009,7 -6955,7 +6997,7 @@@ fn do_test_failure_delay_dust_htlc_loca
                mine_transaction(&nodes[0], &as_prev_commitment_tx[0]);
        }
  
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  
        assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
@@@ -7071,7 -7017,7 +7059,7 @@@ fn do_test_sweep_outbound_htlc_failure_
        if local {
                // We fail dust-HTLC 1 by broadcast of local commitment tx
                mine_transaction(&nodes[0], &as_commitment_tx[0]);
 -              check_closed_broadcast!(nodes[0], false);
 +              check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
        } else {
                // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
                mine_transaction(&nodes[0], &bs_commitment_tx[0]);
 -              check_closed_broadcast!(nodes[0], false);
 +              check_closed_broadcast!(nodes[0], true);
                check_added_monitors!(nodes[0], 1);
                assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
                timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
@@@ -7761,7 -7707,7 +7749,7 @@@ fn test_bump_penalty_txn_on_revoked_htl
        let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
        // B will generate both revoked HTLC-timeout/HTLC-preimage txn from revoked commitment tx
        connect_block(&nodes[1], &Block { header, txdata: vec![revoked_local_txn[0].clone()] });
 -      check_closed_broadcast!(nodes[1], false);
 +      check_closed_broadcast!(nodes[1], true);
        check_added_monitors!(nodes[1], 1);
  
        let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                assert_eq!(node_txn.len(), 0);
                node_txn.clear();
        }
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  }
  
@@@ -8067,7 -8013,7 +8055,7 @@@ fn test_bump_txn_sanitize_tracking_maps
        assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
  
        mine_transaction(&nodes[0], &revoked_local_txn[0]);
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
        let penalty_txn = {
                let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -8389,9 -8335,9 +8377,9 @@@ fn test_pre_lockin_no_chan_closed_updat
        nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
  
        // Move the first channel through the funding flow...
-       let (temporary_channel_id, _tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
+       let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], 100000, 42);
  
-       nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
        check_added_monitors!(nodes[0], 0);
  
        let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
@@@ -8430,7 -8376,7 +8418,7 @@@ fn test_htlc_no_detection() 
        // We deliberately connect the local tx twice as this should provoke a failure calling
        // this test before #653 fix.
        chain::Listen::block_connected(&nodes[0].chain_monitor.chain_monitor, &Block { header, txdata: vec![local_txn[0].clone()] }, nodes[0].best_block_info().1 + 1);
 -      check_closed_broadcast!(nodes[0], false);
 +      check_closed_broadcast!(nodes[0], true);
        check_added_monitors!(nodes[0], 1);
  
        let htlc_timeout = {
@@@ -8488,7 -8434,7 +8476,7 @@@ fn do_test_onchain_htlc_settlement_afte
        let mut force_closing_node = 0; // Alice force-closes
        if !broadcast_alice { force_closing_node = 1; } // Bob force-closes
        nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
 -      check_closed_broadcast!(nodes[force_closing_node], false);
 +      check_closed_broadcast!(nodes[force_closing_node], true);
        check_added_monitors!(nodes[force_closing_node], 1);
        if go_onchain_before_fulfill {
                let txn_to_broadcast = match broadcast_alice {
                connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
                if broadcast_alice {
 -                      check_closed_broadcast!(nodes[1], false);
 +                      check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
                }
                assert_eq!(bob_txn.len(), 1);
                connect_block(&nodes[1], &Block { header, txdata: vec![txn_to_broadcast[0].clone()]});
                // If Bob was the one to force-close, he will have already passed these checks earlier.
                if broadcast_alice {
 -                      check_closed_broadcast!(nodes[1], false);
 +                      check_closed_broadcast!(nodes[1], true);
                        check_added_monitors!(nodes[1], 1);
                }
                let mut bob_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
@@@ -8673,7 -8619,7 +8661,7 @@@ fn test_duplicate_chan_id() 
        // Move the first channel through the funding flow...
        let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 42);
  
-       nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
+       nodes[0].node.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
        check_added_monitors!(nodes[0], 0);
  
        let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
                let mut a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
                let mut as_chan = a_channel_lock.by_id.get_mut(&open_chan_2_msg.temporary_channel_id).unwrap();
                let logger = test_utils::TestLogger::new();
-               as_chan.get_outbound_funding_created(funding_outpoint, &&logger).unwrap()
+               as_chan.get_outbound_funding_created(tx.clone(), funding_outpoint, &&logger).unwrap()
        };
        check_added_monitors!(nodes[0], 0);
        nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
        }
  
        let events_4 = nodes[0].node.get_and_clear_pending_events();
-       assert_eq!(events_4.len(), 1);
-       match events_4[0] {
-               Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
-                       assert_eq!(user_channel_id, 42);
-                       assert_eq!(*funding_txo, funding_output);
-               },
-               _ => panic!("Unexpected event"),
-       };
+       assert_eq!(events_4.len(), 0);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].txid(), funding_output.txid);
  
        let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
        let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
@@@ -8800,6 -8741,7 +8783,7 @@@ fn test_error_chans_closed() 
        nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
        check_added_monitors!(nodes[0], 1);
        check_closed_broadcast!(nodes[0], false);
+       assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0).len(), 1);
        assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
        assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);