}
/// Events which we process internally but cannot be procsesed immediately at the generation site
-/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with
+/// for some reason. They are handled in timer_tick_occurred, so may be processed with
/// quite some time lag.
enum BackgroundEvent {
/// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
-/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
+/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
///
/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
events.append(&mut new_events);
}
- /// Free the background events, generally called from timer_chan_freshness_every_min.
+ /// Free the background events, generally called from timer_tick_occurred.
///
/// Exposed for testing to allow us to process events quickly without generating accidental
- /// BroadcastChannelUpdate events in timer_chan_freshness_every_min.
+ /// BroadcastChannelUpdate events in timer_tick_occurred.
///
/// Expects the caller to have a total_consistency_lock read lock.
fn process_background_events(&self) {
/// This method handles all the details, and must be called roughly once per minute.
///
/// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
- pub fn timer_chan_freshness_every_min(&self) {
+ pub fn timer_tick_occurred(&self) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
self.process_background_events();
// We cannot broadcast our latest local state via monitor update (as
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
// so we track the update internally and handle it when the user next calls
- // timer_chan_freshness_every_min, guaranteeing we're running normally.
+ // timer_tick_occurred, guaranteeing we're running normally.
if let Some((funding_txo, update)) = failure.0.take() {
assert_eq!(update.updates.len(), 1);
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
use chain::chainmonitor::ChainMonitor;
use chain::channelmonitor::Persist;
use chain::keysinterface::{KeysManager, InMemorySigner};
- use chain::transaction::OutPoint;
use ln::channelmanager::{ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
use ln::features::InitFeatures;
use ln::functional_test_utils::*;
tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
value: 8_000_000, script_pubkey: output_script,
}]};
- let funding_outpoint = OutPoint { txid: tx.txid(), index: 0 };
- node_a.funding_transaction_generated(&temporary_channel_id, funding_outpoint);
+ node_a.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
} else { panic!(); }
node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
- get_event!(node_a_holder, Event::FundingBroadcastSafe);
+ assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
let block = Block {
header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },