use util::logger::Logger;
use util::errors::APIError;
use util::config::{UserConfig,ChannelConfig};
+ use util::scid_utils::scid_from_parts;
use std;
-use std::default::Default;
use std::{cmp,mem,fmt};
use std::ops::Deref;
#[cfg(any(test, feature = "fuzztarget"))]
/// could miss the funding_tx_confirmed_in block as well, but it serves as a useful fallback.
funding_tx_confirmed_in: Option<BlockHash>,
short_channel_id: Option<u64>,
- /// Used to deduplicate block_connected callbacks, also used to verify consistency during
- /// ChannelManager deserialization (hence pub(super))
- pub(super) last_block_connected: BlockHash,
funding_tx_confirmations: u64,
counterparty_dust_limit_satoshis: u64,
funding_tx_confirmed_in: None,
short_channel_id: None,
- last_block_connected: Default::default(),
funding_tx_confirmations: 0,
feerate_per_kw: feerate,
funding_tx_confirmed_in: None,
short_channel_id: None,
- last_block_connected: Default::default(),
funding_tx_confirmations: 0,
feerate_per_kw: msg.feerate_per_kw,
&self.get_counterparty_pubkeys().funding_pubkey
}
- pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError> where L::Target: Logger {
+ pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, last_block_hash: BlockHash, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError> where L::Target: Logger {
if self.is_outbound() {
return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
}
&self.channel_transaction_parameters,
funding_redeemscript.clone(), self.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx);
+ holder_commitment_tx, last_block_hash);
channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
/// Handles a funding_signed message from the remote end.
/// If this call is successful, broadcast the funding transaction (and not before!)
- pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, logger: &L) -> Result<ChannelMonitor<Signer>, ChannelError> where L::Target: Logger {
+ pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, last_block_hash: BlockHash, logger: &L) -> Result<ChannelMonitor<Signer>, ChannelError> where L::Target: Logger {
if !self.is_outbound() {
return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
}
&self.channel_transaction_parameters,
funding_redeemscript.clone(), self.channel_value_satoshis,
obscure_factor,
- holder_commitment_tx);
+ holder_commitment_tx, last_block_hash);
channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
_ => true
}
});
- let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
- if header.block_hash() != self.last_block_connected {
- if self.funding_tx_confirmations > 0 {
- self.funding_tx_confirmations += 1;
- }
+
+ if self.funding_tx_confirmations > 0 {
+ self.funding_tx_confirmations += 1;
}
+
+ let non_shutdown_state = self.channel_state & (!MULTI_STATE_FLAGS);
if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
for &(index_in_block, tx) in txdata.iter() {
let funding_txo = self.get_funding_txo().unwrap();
}
}
}
- if height > 0xff_ff_ff || (index_in_block) > 0xff_ff_ff {
- panic!("Block was bogus - either height 16 million or had > 16 million transactions");
- }
- assert!(txo_idx <= 0xffff); // txo_idx is a (u16 as usize), so this is just listed here for completeness
self.funding_tx_confirmations = 1;
- self.short_channel_id = Some(((height as u64) << (5*8)) |
- ((index_in_block as u64) << (2*8)) |
- ((txo_idx as u64) << (0*8)));
+ self.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
+ Ok(scid) => Some(scid),
+ Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
+ }
}
}
}
}
- if header.block_hash() != self.last_block_connected {
- self.last_block_connected = header.block_hash();
- self.update_time_counter = cmp::max(self.update_time_counter, header.time);
- if self.funding_tx_confirmations > 0 {
- if self.funding_tx_confirmations == self.minimum_depth as u64 {
- let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
- self.channel_state |= ChannelState::OurFundingLocked as u32;
- true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
- self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
- self.update_time_counter += 1;
- true
- } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
- // We got a reorg but not enough to trigger a force close, just update
- // funding_tx_confirmed_in and return.
- false
- } else if self.channel_state < ChannelState::ChannelFunded as u32 {
- panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
+
+ self.update_time_counter = cmp::max(self.update_time_counter, header.time);
+ if self.funding_tx_confirmations > 0 {
+ if self.funding_tx_confirmations == self.minimum_depth as u64 {
+ let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
+ self.channel_state |= ChannelState::OurFundingLocked as u32;
+ true
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirFundingLocked as u32) {
+ self.channel_state = ChannelState::ChannelFunded as u32 | (self.channel_state & MULTI_STATE_FLAGS);
+ self.update_time_counter += 1;
+ true
+ } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurFundingLocked as u32) {
+ // We got a reorg but not enough to trigger a force close, just update
+ // funding_tx_confirmed_in and return.
+ false
+ } else if self.channel_state < ChannelState::ChannelFunded as u32 {
+ panic!("Started confirming a channel in a state pre-FundingSent?: {}", self.channel_state);
+ } else {
+ // We got a reorg but not enough to trigger a force close, just update
+ // funding_tx_confirmed_in and return.
+ false
+ };
+ self.funding_tx_confirmed_in = Some(header.block_hash());
+
+ //TODO: Note that this must be a duplicate of the previous commitment point they sent us,
+ //as otherwise we will have a commitment transaction that they can't revoke (well, kinda,
+ //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
+ //a protocol oversight, but I assume I'm just missing something.
+ if need_commitment_update {
+ if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
+ let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ return Ok((Some(msgs::FundingLocked {
+ channel_id: self.channel_id,
+ next_per_commitment_point,
+ }), timed_out_htlcs));
} else {
- // We got a reorg but not enough to trigger a force close, just update
- // funding_tx_confirmed_in and return.
- false
- };
- self.funding_tx_confirmed_in = Some(self.last_block_connected);
-
- //TODO: Note that this must be a duplicate of the previous commitment point they sent us,
- //as otherwise we will have a commitment transaction that they can't revoke (well, kinda,
- //they can by sending two revoke_and_acks back-to-back, but not really). This appears to be
- //a protocol oversight, but I assume I'm just missing something.
- if need_commitment_update {
- if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
- let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
- return Ok((Some(msgs::FundingLocked {
- channel_id: self.channel_id,
- next_per_commitment_point,
- }), timed_out_htlcs));
- } else {
- self.monitor_pending_funding_locked = true;
- return Ok((None, timed_out_htlcs));
- }
+ self.monitor_pending_funding_locked = true;
+ return Ok((None, timed_out_htlcs));
}
}
}
return true;
}
}
- self.last_block_connected = header.block_hash();
- if Some(self.last_block_connected) == self.funding_tx_confirmed_in {
+ if Some(header.block_hash()) == self.funding_tx_confirmed_in {
self.funding_tx_confirmations = self.minimum_depth as u64 - 1;
}
false
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>) {
+ pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) {
+ // Note that we MUST only generate a monitor update that indicates force-closure - we're
+ // called during initialization prior to the chain_monitor in the encompassing ChannelManager
+ // being fully configured in some cases. Thus, its likely any monitor events we generate will
+ // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
_ => {}
}
}
- let funding_txo = if let Some(funding_txo) = self.get_funding_txo() {
+ let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
// If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
// returning a channel monitor update here would imply a channel monitor update before
// we even registered the channel monitor to begin with, which is invalid.
// monitor update to the user, even if we return one).
// See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelFunded as u32 | ChannelState::ShutdownComplete as u32) != 0 {
- Some(funding_txo.clone())
+ self.latest_monitor_update_id += 1;
+ Some((funding_txo, ChannelMonitorUpdate {
+ update_id: self.latest_monitor_update_id,
+ updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
+ }))
} else { None }
} else { None };
self.channel_state = ChannelState::ShutdownComplete as u32;
self.update_time_counter += 1;
- self.latest_monitor_update_id += 1;
- (funding_txo, ChannelMonitorUpdate {
- update_id: self.latest_monitor_update_id,
- updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
- }, dropped_outbound_htlcs)
+ (monitor_update, dropped_outbound_htlcs)
}
}
self.funding_tx_confirmed_in.write(writer)?;
self.short_channel_id.write(writer)?;
-
- self.last_block_connected.write(writer)?;
self.funding_tx_confirmations.write(writer)?;
self.counterparty_dust_limit_satoshis.write(writer)?;
let funding_tx_confirmed_in = Readable::read(reader)?;
let short_channel_id = Readable::read(reader)?;
-
- let last_block_connected = Readable::read(reader)?;
let funding_tx_confirmations = Readable::read(reader)?;
let counterparty_dust_limit_satoshis = Readable::read(reader)?;
funding_tx_confirmed_in,
short_channel_id,
- last_block_connected,
funding_tx_confirmations,
counterparty_dust_limit_satoshis,
let secp_ctx = Secp256k1::new();
let seed = [42; 32];
let network = Network::Testnet;
+ let chain_hash = genesis_block(network).header.block_hash();
+ let last_block_hash = chain_hash;
let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
// Go through the flow of opening a channel between two nodes.
let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
- let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+ let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), &open_channel_msg, 7, &config).unwrap();
}]};
let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
let funding_created_msg = node_a_chan.get_outbound_funding_created(funding_outpoint, &&logger).unwrap();
- let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, &&logger).unwrap();
+ let (funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg, last_block_hash, &&logger).unwrap();
// Node B --> Node A: funding signed
- let _ = node_a_chan.funding_signed(&funding_signed_msg, &&logger);
+ let _ = node_a_chan.funding_signed(&funding_signed_msg, last_block_hash, &&logger);
// Now disconnect the two nodes and check that the commitment point in
// Node B's channel_reestablish message is sane.
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentSecret(pub [u8;32]);
-type ShutdownResult = (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
pub(super) pending_msg_events: Vec<MessageSendEvent>,
}
+/// Events which we process internally but cannot be procsesed immediately at the generation site
+/// for some reason. They are handled in timer_chan_freshness_every_min, so may be processed with
+/// quite some time lag.
+enum BackgroundEvent {
+ /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
+ /// commitment transaction.
+ ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
+}
+
/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
/// the latest Init features we heard from the peer.
struct PeerState {
/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
///
-/// Note that the deserializer is only implemented for (Option<BlockHash>, ChannelManager), which
+/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which
/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
/// block_connected() to step towards your best block) upon deserialization before using the
pub(super) latest_block_height: AtomicUsize,
#[cfg(not(test))]
latest_block_height: AtomicUsize,
- last_block_hash: Mutex<BlockHash>,
+ last_block_hash: RwLock<BlockHash>,
secp_ctx: Secp256k1<secp256k1::All>,
#[cfg(any(test, feature = "_test_utils"))]
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
pending_events: Mutex<Vec<events::Event>>,
+ pending_background_events: Mutex<Vec<BackgroundEvent>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
/// Taken first everywhere where we are making changes before any other locks.
logger: L,
}
+/// Chain-related parameters used to construct a new `ChannelManager`.
+///
+/// Typically, the block-specific parameters are derived from the best block hash for the network,
+/// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
+/// are not needed when deserializing a previously constructed `ChannelManager`.
+pub struct ChainParameters {
+ /// The network for determining the `chain_hash` in Lightning messages.
+ pub network: Network,
+
+ /// The hash of the latest block successfully connected.
+ pub latest_hash: BlockHash,
+
+ /// The height of the latest block successfully connected.
+ ///
+ /// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
+ pub latest_height: usize,
+}
+
/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
-/// desirable to notify any listeners on `wait_timeout`/`wait` that new updates are available for
-/// persistence. Therefore, this struct is responsible for locking the total consistency lock and,
-/// upon going out of scope, sending the aforementioned notification (since the lock being released
-/// indicates that the updates are ready for persistence).
+/// desirable to notify any listeners on `await_persistable_update_timeout`/
+/// `await_persistable_update` that new updates are available for persistence. Therefore, this
+/// struct is responsible for locking the total consistency lock and, upon going out of scope,
+/// sending the aforementioned notification (since the lock being released indicates that the
+/// updates are ready for persistence).
struct PersistenceNotifierGuard<'a> {
persistence_notifier: &'a PersistenceNotifier,
// We hold onto this result so the lock doesn't get released immediately.
}
}
-/// The amount of time we require our counterparty wait to claim their money (ie time between when
-/// we, or our watchtower, must check for them having broadcast a theft transaction).
+/// The amount of time in blocks we require our counterparty wait to claim their money (ie time
+/// between when we, or our watchtower, must check for them having broadcast a theft transaction).
pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
-/// The amount of time we're willing to wait to claim money back to us
-pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7;
+/// The amount of time in blocks we're willing to wait to claim money back to us. This matches
+/// the maximum required amount in lnd as of March 2021.
+pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
///
/// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
///
- /// Users must provide the current blockchain height from which to track onchain channel
- /// funding outpoints and send payments with reliable timelocks.
- ///
/// Users need to notify the new ChannelManager when a new block is connected or
- /// disconnected using its `block_connected` and `block_disconnected` methods.
- pub fn new(network: Network, fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self {
+ /// disconnected using its `block_connected` and `block_disconnected` methods, starting
+ /// from after `params.latest_hash`.
+ pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self {
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
ChannelManager {
default_configuration: config.clone(),
- genesis_hash: genesis_block(network).header.block_hash(),
+ genesis_hash: genesis_block(params.network).header.block_hash(),
fee_estimator: fee_est,
chain_monitor,
tx_broadcaster,
- latest_block_height: AtomicUsize::new(current_blockchain_height),
- last_block_hash: Mutex::new(Default::default()),
+ latest_block_height: AtomicUsize::new(params.latest_height),
+ last_block_hash: RwLock::new(params.latest_hash),
secp_ctx,
channel_state: Mutex::new(ChannelHolder{
per_peer_state: RwLock::new(HashMap::new()),
pending_events: Mutex::new(Vec::new()),
+ pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
persistence_notifier: PersistenceNotifier::new(),
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
- let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res;
+ let (monitor_update_option, mut failed_htlcs) = shutdown_res;
log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- if let Some(funding_txo) = funding_txo_option {
+ if let Some((funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
events.append(&mut new_events);
}
+ /// Free the background events, generally called from timer_chan_freshness_every_min.
+ ///
+ /// Exposed for testing to allow us to process events quickly without generating accidental
+ /// BroadcastChannelUpdate events in timer_chan_freshness_every_min.
+ ///
+ /// Expects the caller to have a total_consistency_lock read lock.
+ fn process_background_events(&self) {
+ let mut background_events = Vec::new();
+ mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
+ for event in background_events.drain(..) {
+ match event {
+ BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
+ // The channel has already been closed, so no use bothering to care about the
+ // monitor updating completing.
+ let _ = self.chain_monitor.update_channel(funding_txo, update);
+ },
+ }
+ }
+ }
+
+ #[cfg(any(test, feature = "_test_utils"))]
+ pub(crate) fn test_process_background_events(&self) {
+ self.process_background_events();
+ }
+
/// If a peer is disconnected we mark any channels with that peer as 'disabled'.
/// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
/// to inform the network about the uselessness of these channels.
///
/// This method handles all the details, and must be called roughly once per minute.
+ ///
+ /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
pub fn timer_chan_freshness_every_min(&self) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ self.process_background_events();
+
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
for (_, chan) in channel_state.by_id.iter_mut() {
//identify whether we sent it or not based on the (I presume) very different runtime
//between the branches here. We should make this async and move it into the forward HTLCs
//timer handling.
+
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // from block_connected which may run during initialization prior to the chain_monitor
+ // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
HTLCSource::OutboundRoute { ref path, .. } => {
log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
let ((funding_msg, monitor), mut chan) = {
+ let last_block_hash = *self.last_block_hash.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- (try_chan_entry!(self, chan.get_mut().funding_created(msg, &self.logger), channel_state, chan), chan.remove())
+ (try_chan_entry!(self, chan.get_mut().funding_created(msg, last_block_hash, &self.logger), channel_state, chan), chan.remove())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
}
// We do not do a force-close here as that would generate a monitor update for
// a monitor that we didn't manage to store (and that we don't care about - we
// don't respond with the funding_signed so the channel can never go on chain).
- let (_funding_txo_option, _monitor_update, failed_htlcs) = chan.force_shutdown(true);
+ let (_monitor_update, failed_htlcs) = chan.force_shutdown(true);
assert!(failed_htlcs.is_empty());
return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
},
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
let (funding_txo, user_id) = {
+ let last_block_hash = *self.last_block_hash.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.channel_id) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let monitor = match chan.get_mut().funding_signed(&msg, &self.logger) {
+ let monitor = match chan.get_mut().funding_signed(&msg, last_block_hash, &self.logger) {
Ok(update) => update,
Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
};
self.finish_force_close_channel(failure);
}
}
+
+ /// Handle a list of channel failures during a block_connected or block_disconnected call,
+ /// pushing the channel monitor update (if any) to the background events queue and removing the
+ /// Channel object.
+ fn handle_init_event_channel_failures(&self, mut failed_channels: Vec<ShutdownResult>) {
+ for mut failure in failed_channels.drain(..) {
+ // Either a commitment transactions has been confirmed on-chain or
+ // Channel::block_disconnected detected that the funding transaction has been
+ // reorganized out of the main chain.
+ // We cannot broadcast our latest local state via monitor update (as
+ // Channel::force_shutdown tries to make us do) as we may still be in initialization,
+ // so we track the update internally and handle it when the user next calls
+ // timer_chan_freshness_every_min, guaranteeing we're running normally.
+ if let Some((funding_txo, update)) = failure.0.take() {
+ assert_eq!(update.updates.len(), 1);
+ if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
+ assert!(should_broadcast);
+ } else { unreachable!(); }
+ self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
+ }
+ self.finish_force_close_channel(failure);
+ }
+ }
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
{
/// Updates channel state based on transactions seen in a connected block.
pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
- let header_hash = header.block_hash();
- log_trace!(self.logger, "Block {} at height {} connected", header_hash, height);
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // during initialization prior to the chain_monitor being fully configured in some cases.
+ // See the docs for `ChannelManagerReadArgs` for more.
+ let block_hash = header.block_hash();
+ log_trace!(self.logger, "Block {} at height {} connected", block_hash, height);
+
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+
+ self.latest_block_height.store(height as usize, Ordering::Release);
+ *self.last_block_hash.write().unwrap() = block_hash;
+
let mut failed_channels = Vec::new();
let mut timed_out_htlcs = Vec::new();
{
if let Some(short_id) = channel.get_short_channel_id() {
short_to_id.remove(&short_id);
}
- // It looks like our counterparty went on-chain. We go ahead and
- // broadcast our latest local state as well here, just in case its
- // some kind of SPV attack, though we expect these to be dropped.
+ // It looks like our counterparty went on-chain. Close the channel.
failed_channels.push(channel.force_shutdown(true));
if let Ok(update) = self.get_channel_update(&channel) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
!htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
});
}
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
- }
+
+ self.handle_init_event_channel_failures(failed_channels);
for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
}
- self.latest_block_height.store(height as usize, Ordering::Release);
- *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
+
loop {
// Update last_node_announcement_serial to be the max of its current value and the
// block timestamp. This should keep us close to the current time without relying on
/// If necessary, the channel may be force-closed without letting the counterparty participate
/// in the shutdown.
pub fn block_disconnected(&self, header: &BlockHeader) {
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // during initialization prior to the chain_monitor being fully configured in some cases.
+ // See the docs for `ChannelManagerReadArgs` for more.
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+
+ self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
+ *self.last_block_hash.write().unwrap() = header.prev_blockhash;
+
let mut failed_channels = Vec::new();
{
let mut channel_lock = self.channel_state.lock().unwrap();
}
});
}
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
- }
- self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
- *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.block_hash();
+
+ self.handle_init_event_channel_failures(failed_channels);
}
/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
- /// indicating whether persistence is necessary. Only one listener on `wait_timeout` is
- /// guaranteed to be woken up.
+ /// indicating whether persistence is necessary. Only one listener on
+ /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
+ /// up.
/// Note that the feature `allow_wallclock_use` must be enabled to use this function.
#[cfg(any(test, feature = "allow_wallclock_use"))]
- pub fn wait_timeout(&self, max_wait: Duration) -> bool {
+ pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
self.persistence_notifier.wait_timeout(max_wait)
}
- /// Blocks until ChannelManager needs to be persisted. Only one listener on `wait` is
- /// guaranteed to be woken up.
- pub fn wait(&self) {
+ /// Blocks until ChannelManager needs to be persisted. Only one listener on
+ /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
+ /// up.
+ pub fn await_persistable_update(&self) {
self.persistence_notifier.wait()
}
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
+ &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
}
});
}
}
/// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to
-/// disk/backups, through `wait_timeout` and `wait`.
+/// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`.
struct PersistenceNotifier {
/// Users won't access the persistence_lock directly, but rather wait on its bool using
/// `wait_timeout` and `wait`.
self.genesis_hash.write(writer)?;
(self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
- self.last_block_hash.lock().unwrap().write(writer)?;
+ self.last_block_hash.read().unwrap().write(writer)?;
let channel_state = self.channel_state.lock().unwrap();
let mut unfunded_channels = 0;
event.write(writer)?;
}
+ let background_events = self.pending_background_events.lock().unwrap();
+ (background_events.len() as u64).write(writer)?;
+ for event in background_events.iter() {
+ match event {
+ BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
+ 0u8.write(writer)?;
+ funding_txo.write(writer)?;
+ monitor_update.write(writer)?;
+ },
+ }
+ }
+
(self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
Ok(())
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
/// is:
/// 1) Deserialize all stored ChannelMonitors.
-/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Option<BlockHash>,
-/// ChannelManager)>::read(reader, args).
+/// 2) Deserialize the ChannelManager by filling in this struct and calling:
+/// <(BlockHash, ChannelManager)>::read(reader, args)
/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
-/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
-/// ChannelMonitor::get_outputs_to_watch() and ChannelMonitor::get_funding_txo().
+/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same
+/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and
+/// ChannelMonitor::get_funding_txo().
/// 4) Reconnect blocks on your ChannelMonitors.
-/// 5) Move the ChannelMonitors into your local chain::Watch.
-/// 6) Disconnect/connect blocks on the ChannelManager.
+/// 5) Disconnect/connect blocks on the ChannelManager.
+/// 6) Move the ChannelMonitors into your local chain::Watch.
+///
+/// Note that the ordering of #4-6 is not of importance, however all three must occur before you
+/// call any other methods on the newly-deserialized ChannelManager.
+///
+/// Note that because some channels may be closed during deserialization, it is critical that you
+/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
+/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
+/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
+/// not force-close the same channels but consider them live), you may end up revoking a state for
+/// which you've already broadcasted the transaction.
pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
// SipmleArcChannelManager type:
impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (Option<BlockHash>, Arc<ChannelManager<Signer, M, T, K, F, L>>)
+ ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<Signer, M, T, K, F, L>>)
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface<Signer = Signer>,
L::Target: Logger,
{
fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
- let (blockhash, chan_manager) = <(Option<BlockHash>, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
+ let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
}
impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (Option<BlockHash>, ChannelManager<Signer, M, T, K, F, L>)
+ ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (BlockHash, ChannelManager<Signer, M, T, K, F, L>)
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
K::Target: KeysInterface<Signer = Signer>,
let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
for _ in 0..channel_count {
let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
- if channel.last_block_connected != Default::default() && channel.last_block_connected != last_block_hash {
- return Err(DecodeError::InvalidValue);
- }
-
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
// But if the channel is behind of the monitor, close the channel:
- let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true);
+ let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
failed_htlcs.append(&mut new_failed_htlcs);
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
} else {
}
}
+ let background_event_count: u64 = Readable::read(reader)?;
+ let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
+ for _ in 0..background_event_count {
+ match <u8 as Readable>::read(reader)? {
+ 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
+ _ => return Err(DecodeError::InvalidValue),
+ }
+ }
+
let last_node_announcement_serial: u32 = Readable::read(reader)?;
let mut secp_ctx = Secp256k1::new();
tx_broadcaster: args.tx_broadcaster,
latest_block_height: AtomicUsize::new(latest_block_height as usize),
- last_block_hash: Mutex::new(last_block_hash),
+ last_block_hash: RwLock::new(last_block_hash),
secp_ctx,
channel_state: Mutex::new(ChannelHolder {
per_peer_state: RwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
+ pending_background_events: Mutex::new(pending_background_events_read),
total_consistency_lock: RwLock::new(()),
persistence_notifier: PersistenceNotifier::new(),
//TODO: Broadcast channel update for closed channels, but only after we've made a
//connection or two.
- let last_seen_block_hash = if last_block_hash == Default::default() {
- None
- } else {
- Some(last_block_hash)
- };
- Ok((last_seen_block_hash, channel_manager))
+ Ok((last_block_hash.clone(), channel_manager))
}
}