use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use chain::transaction::{OutPoint, TransactionData};
+// Since this struct is returned in `list_channels` methods, expose it here in case users want to
+// construct one themselves.
+pub use ln::channel::CounterpartyForwardingInfo;
use ln::channel::{Channel, ChannelError};
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
pub(super) latest_block_height: AtomicUsize,
#[cfg(not(test))]
latest_block_height: AtomicUsize,
- last_block_hash: Mutex<BlockHash>,
+ last_block_hash: RwLock<BlockHash>,
secp_ctx: Secp256k1<secp256k1::All>,
#[cfg(any(test, feature = "_test_utils"))]
}
/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
-/// desirable to notify any listeners on `wait_timeout`/`wait` that new updates are available for
-/// persistence. Therefore, this struct is responsible for locking the total consistency lock and,
-/// upon going out of scope, sending the aforementioned notification (since the lock being released
-/// indicates that the updates are ready for persistence).
+/// desirable to notify any listeners on `await_persistable_update_timeout`/
+/// `await_persistable_update` that new updates are available for persistence. Therefore, this
+/// struct is responsible for locking the total consistency lock and, upon going out of scope,
+/// sending the aforementioned notification (since the lock being released indicates that the
+/// updates are ready for persistence).
struct PersistenceNotifierGuard<'a> {
persistence_notifier: &'a PersistenceNotifier,
// We hold onto this result so the lock doesn't get released immediately.
}
}
-/// The amount of time we require our counterparty wait to claim their money (ie time between when
-/// we, or our watchtower, must check for them having broadcast a theft transaction).
-pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
-/// The amount of time we're willing to wait to claim money back to us
-pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7;
+/// The amount of time in blocks we require our counterparty wait to claim their money (ie time
+/// between when we, or our watchtower, must check for them having broadcast a theft transaction).
+///
+/// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
+///
+/// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
+pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
+/// The amount of time in blocks we're willing to wait to claim money back to us. This matches
+/// the maximum required amount in lnd as of March 2021.
+pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
-/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
-/// ie the node we forwarded the payment on to should always have enough room to reliably time out
-/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
-/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
-const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
+/// HTLC's CLTV. The current default represents roughly six hours of blocks at six blocks/hour.
+///
+/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
+///
+/// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
+// This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
+// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
+// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
+// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
+pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 6;
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
// LATENCY_GRACE_PERIOD_BLOCKS.
#[deny(const_err)]
#[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
#[deny(const_err)]
#[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
#[derive(Clone)]
/// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
/// the peer is connected, and (c) no monitor update failure is pending resolution.
pub is_live: bool,
+
+ /// Information on the fees and requirements that the counterparty requires when forwarding
+ /// payments to us through this channel.
+ pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
}
/// If a payment fails to send, it can be in one of several states. This enum is returned as the
tx_broadcaster,
latest_block_height: AtomicUsize::new(params.latest_height),
- last_block_hash: Mutex::new(params.latest_hash),
+ last_block_hash: RwLock::new(params.latest_hash),
secp_ctx,
channel_state: Mutex::new(ChannelHolder{
outbound_capacity_msat,
user_id: channel.get_user_id(),
is_live: channel.is_live(),
+ counterparty_forwarding_info: channel.counterparty_forwarding_info(),
});
}
}
}
}
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> {
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
if let Some(node_id) = peer_node_id {
if chan.get().get_counterparty_node_id() != *node_id {
- // Error or Ok here doesn't matter - the result is only exposed publicly
- // when peer_node_id is None anyway.
- return Ok(());
+ return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
}
if let Some(short_id) = chan.get().get_short_channel_id() {
});
}
- Ok(())
+ Ok(chan.get_counterparty_node_id())
}
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- self.force_close_channel_with_peer(channel_id, None)
+ match self.force_close_channel_with_peer(channel_id, None) {
+ Ok(counterparty_node_id) => {
+ self.channel_state.lock().unwrap().pending_msg_events.push(
+ events::MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+ },
+ }
+ );
+ Ok(())
+ },
+ Err(e) => Err(e)
+ }
}
/// Force close all channels, immediately broadcasting the latest local commitment transaction
if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
}
- if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+ if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
}
let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
short_channel_id,
timestamp: chan.get_update_time_counter(),
flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
- cltv_expiry_delta: CLTV_EXPIRY_DELTA,
+ cltv_expiry_delta: chan.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator),
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
let ((funding_msg, monitor), mut chan) = {
+ let last_block_hash = *self.last_block_hash.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- (try_chan_entry!(self, chan.get_mut().funding_created(msg, &self.logger), channel_state, chan), chan.remove())
+ (try_chan_entry!(self, chan.get_mut().funding_created(msg, last_block_hash, &self.logger), channel_state, chan), chan.remove())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
}
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
let (funding_txo, user_id) = {
+ let last_block_hash = *self.last_block_hash.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.channel_id) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let monitor = match chan.get_mut().funding_signed(&msg, &self.logger) {
+ let monitor = match chan.get_mut().funding_signed(&msg, last_block_hash, &self.logger) {
Ok(update) => update,
Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
};
Ok(())
}
+ fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
+ Some(chan_id) => chan_id.clone(),
+ None => {
+ // It's not a local channel
+ return Ok(())
+ }
+ };
+ match channel_state.by_id.entry(chan_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+ // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
+ }
+ try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+ },
+ hash_map::Entry::Vacant(_) => unreachable!()
+ }
+ Ok(())
+ }
+
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
msg: update
});
}
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ },
+ });
}
},
}
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
- let header_hash = header.block_hash();
- log_trace!(self.logger, "Block {} at height {} connected", header_hash, height);
+ let block_hash = header.block_hash();
+ log_trace!(self.logger, "Block {} at height {} connected", block_hash, height);
+
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+
+ assert_eq!(*self.last_block_hash.read().unwrap(), header.prev_blockhash,
+ "Blocks must be connected in chain-order - the connected header must build on the last connected header");
+ assert_eq!(self.latest_block_height.load(Ordering::Acquire) as u64, height as u64 - 1,
+ "Blocks must be connected in chain-order - the connected header must build on the last connected header");
+ self.latest_block_height.store(height as usize, Ordering::Release);
+ *self.last_block_hash.write().unwrap() = block_hash;
+
let mut failed_channels = Vec::new();
let mut timed_out_htlcs = Vec::new();
{
msg: update
});
}
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: channel.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: channel.channel_id(), data: "Commitment or closing transaction was confirmed on chain.".to_owned() }
+ },
+ });
return false;
}
}
for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
}
- self.latest_block_height.store(height as usize, Ordering::Release);
- *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
+
loop {
// Update last_node_announcement_serial to be the max of its current value and the
// block timestamp. This should keep us close to the current time without relying on
// during initialization prior to the chain_monitor being fully configured in some cases.
// See the docs for `ChannelManagerReadArgs` for more.
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+
+ assert_eq!(*self.last_block_hash.read().unwrap(), header.block_hash(),
+ "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
+ let new_height = self.latest_block_height.fetch_sub(1, Ordering::AcqRel) as u32 - 1;
+ *self.last_block_hash.write().unwrap() = header.prev_blockhash;
+
let mut failed_channels = Vec::new();
{
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
- channel_state.by_id.retain(|_, v| {
- if v.block_disconnected(header) {
+ channel_state.by_id.retain(|channel_id, v| {
+ if v.block_disconnected(header, new_height) {
if let Some(short_id) = v.get_short_channel_id() {
short_to_id.remove(&short_id);
}
msg: update
});
}
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: v.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Funding transaction was un-confirmed.".to_owned() }
+ },
+ });
false
} else {
true
}
});
}
+
self.handle_init_event_channel_failures(failed_channels);
- self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
- *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.block_hash();
}
/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
- /// indicating whether persistence is necessary. Only one listener on `wait_timeout` is
- /// guaranteed to be woken up.
+ /// indicating whether persistence is necessary. Only one listener on
+ /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
+ /// up.
/// Note that the feature `allow_wallclock_use` must be enabled to use this function.
#[cfg(any(test, feature = "allow_wallclock_use"))]
- pub fn wait_timeout(&self, max_wait: Duration) -> bool {
+ pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
self.persistence_notifier.wait_timeout(max_wait)
}
- /// Blocks until ChannelManager needs to be persisted. Only one listener on `wait` is
- /// guaranteed to be woken up.
- pub fn wait(&self) {
+ /// Blocks until ChannelManager needs to be persisted. Only one listener on
+ /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
+ /// up.
+ pub fn await_persistable_update(&self) {
self.persistence_notifier.wait()
}
let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
}
+ fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
+ let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
+ }
+
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
+ &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
}
});
}
}
/// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to
-/// disk/backups, through `wait_timeout` and `wait`.
+/// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`.
struct PersistenceNotifier {
/// Users won't access the persistence_lock directly, but rather wait on its bool using
/// `wait_timeout` and `wait`.
self.genesis_hash.write(writer)?;
(self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
- self.last_block_hash.lock().unwrap().write(writer)?;
+ self.last_block_hash.read().unwrap().write(writer)?;
let channel_state = self.channel_state.lock().unwrap();
let mut unfunded_channels = 0;
let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
for _ in 0..channel_count {
let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
- if channel.last_block_connected != Default::default() && channel.last_block_connected != last_block_hash {
- return Err(DecodeError::InvalidValue);
- }
-
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
tx_broadcaster: args.tx_broadcaster,
latest_block_height: AtomicUsize::new(latest_block_height as usize),
- last_block_hash: Mutex::new(last_block_hash),
+ last_block_hash: RwLock::new(last_block_hash),
secp_ctx,
channel_state: Mutex::new(ChannelHolder {