//! imply it needs to fail HTLCs/payments/channels it manages).
//!
-use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::transaction::Transaction;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hashes::cmp::fixed_time_eq;
-use bitcoin::hash_types::BlockHash;
+use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::secp256k1::key::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
use bitcoin::secp256k1;
use chain;
+use chain::Confirm;
use chain::Watch;
use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
use chain::transaction::{OutPoint, TransactionData};
+// Since this struct is returned in `list_channels` methods, expose it here in case users want to
+// construct one themselves.
+pub use ln::channel::CounterpartyForwardingInfo;
use ln::channel::{Channel, ChannelError};
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
use ln::msgs::NetAddress;
use ln::onion_utils;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
-use chain::keysinterface::{ChannelKeys, KeysInterface, KeysManager, InMemoryChannelKeys};
+use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
use util::config::UserConfig;
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::{byte_utils, events};
short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
},
Receive {
- payment_data: Option<msgs::FinalOnionHopData>,
+ payment_data: msgs::FinalOnionHopData,
incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
},
}
struct ClaimableHTLC {
prev_hop: HTLCPreviousHopData,
value: u64,
- /// Filled in when the HTLC was received with a payment_secret packet, which contains a
- /// total_msat (which may differ from value if this is a Multi-Path Payment) and a
+ /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a
/// payment_secret which prevents path-probing attacks and can associate different HTLCs which
/// are part of the same payment.
- payment_data: Option<msgs::FinalOnionHopData>,
+ payment_data: msgs::FinalOnionHopData,
cltv_expiry: u32,
}
#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
pub struct PaymentSecret(pub [u8;32]);
-type ShutdownResult = (Option<OutPoint>, ChannelMonitorUpdate, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
}
// Note this is only exposed in cfg(test):
-pub(super) struct ChannelHolder<ChanSigner: ChannelKeys> {
- pub(super) by_id: HashMap<[u8; 32], Channel<ChanSigner>>,
+pub(super) struct ChannelHolder<Signer: Sign> {
+ pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
pub(super) short_to_id: HashMap<u64, [u8; 32]>,
/// short channel id -> forward infos. Key of 0 means payments received
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the existence of a channel with the short id here, nor the short
/// ids in the PendingHTLCInfo!
pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
- /// (payment_hash, payment_secret) -> Vec<HTLCs> for tracking HTLCs that
- /// were to us and can be failed/claimed by the user
+ /// Map from payment hash to any HTLCs which are to us and can be failed/claimed by the user.
/// Note that while this is held in the same mutex as the channels themselves, no consistency
/// guarantees are made about the channels given here actually existing anymore by the time you
/// go to read them!
- claimable_htlcs: HashMap<(PaymentHash, Option<PaymentSecret>), Vec<ClaimableHTLC>>,
+ claimable_htlcs: HashMap<PaymentHash, Vec<ClaimableHTLC>>,
/// Messages to send to peers - pushed to in the same lock that they are generated in (except
/// for broadcast messages, where ordering isn't as strict).
pub(super) pending_msg_events: Vec<MessageSendEvent>,
}
+/// Events which we process internally but cannot be procsesed immediately at the generation site
+/// for some reason. They are handled in timer_tick_occurred, so may be processed with
+/// quite some time lag.
+enum BackgroundEvent {
+ /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
+ /// commitment transaction.
+ ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
+}
+
/// State we hold per-peer. In the future we should put channels in here, but for now we only hold
/// the latest Init features we heard from the peer.
struct PeerState {
latest_features: InitFeatures,
}
-#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
-const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
+/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
+/// actually ours and not some duplicate HTLC sent to us by a node along the route.
+///
+/// For users who don't want to bother doing their own payment preimage storage, we also store that
+/// here.
+struct PendingInboundPayment {
+ /// The payment secret that the sender must use for us to accept this payment
+ payment_secret: PaymentSecret,
+ /// Time at which this HTLC expires - blocks with a header time above this value will result in
+ /// this payment being removed.
+ expiry_time: u64,
+ /// Arbitrary identifier the user specifies (or not)
+ user_payment_id: u64,
+ // Other required attributes of the payment, optionally enforced:
+ payment_preimage: Option<PaymentPreimage>,
+ min_value_msat: Option<u64>,
+}
/// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
/// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
/// issues such as overly long function definitions. Note that the ChannelManager can take any
/// type that implements KeysInterface for its keys manager, but this type alias chooses the
/// concrete type of the KeysManager.
-pub type SimpleArcChannelManager<M, T, F, L> = Arc<ChannelManager<InMemoryChannelKeys, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>>;
+pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
/// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
/// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
/// helps with issues such as long function definitions. Note that the ChannelManager can take any
/// type that implements KeysInterface for its keys manager, but this type alias chooses the
/// concrete type of the KeysManager.
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<InMemoryChannelKeys, &'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<InMemorySigner, &'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
///
-/// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which
+/// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which
/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
/// block_connected() to step towards your best block) upon deserialization before using the
/// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
/// offline for a full minute. In order to track this, you must call
-/// timer_chan_freshness_every_min roughly once per minute, though it doesn't have to be perfect.
+/// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
///
/// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
/// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
/// essentially you should default to using a SimpleRefChannelManager, and use a
/// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
/// you're using lightning-net-tokio.
-pub struct ChannelManager<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- where M::Target: chain::Watch<Keys=ChanSigner>,
+pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
tx_broadcaster: T,
#[cfg(test)]
- pub(super) latest_block_height: AtomicUsize,
+ pub(super) best_block: RwLock<BestBlock>,
#[cfg(not(test))]
- latest_block_height: AtomicUsize,
- last_block_hash: Mutex<BlockHash>,
+ best_block: RwLock<BestBlock>,
secp_ctx: Secp256k1<secp256k1::All>,
#[cfg(any(test, feature = "_test_utils"))]
- pub(super) channel_state: Mutex<ChannelHolder<ChanSigner>>,
+ pub(super) channel_state: Mutex<ChannelHolder<Signer>>,
#[cfg(not(any(test, feature = "_test_utils")))]
- channel_state: Mutex<ChannelHolder<ChanSigner>>,
+ channel_state: Mutex<ChannelHolder<Signer>>,
+
+ /// Storage for PaymentSecrets and any requirements on future inbound payments before we will
+ /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements
+ /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
+ /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out.
+ /// Locked *after* channel_state.
+ pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
+
our_network_key: SecretKey,
+ our_network_pubkey: PublicKey,
/// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
/// value increases strictly since we don't assume access to a time source.
last_node_announcement_serial: AtomicUsize,
+ /// The highest block timestamp we've seen, which is usually a good guess at the current time.
+ /// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
+ /// very far in the past, and can only ever be up to two hours in the future.
+ highest_seen_timestamp: AtomicUsize,
+
/// The bulk of our storage will eventually be here (channels and message queues and the like).
/// If we are connected to a peer we always at least have an entry here, even if no channels
/// are currently open with that peer.
per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
pending_events: Mutex<Vec<events::Event>>,
+ pending_background_events: Mutex<Vec<BackgroundEvent>>,
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
/// Essentially just when we're serializing ourselves out.
/// Taken first everywhere where we are making changes before any other locks.
logger: L,
}
+/// Chain-related parameters used to construct a new `ChannelManager`.
+///
+/// Typically, the block-specific parameters are derived from the best block hash for the network,
+/// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
+/// are not needed when deserializing a previously constructed `ChannelManager`.
+pub struct ChainParameters {
+ /// The network for determining the `chain_hash` in Lightning messages.
+ pub network: Network,
+
+ /// The hash and height of the latest block successfully connected.
+ ///
+ /// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
+ pub best_block: BestBlock,
+}
+
+/// The best known block as identified by its hash and height.
+#[derive(Clone, Copy)]
+pub struct BestBlock {
+ block_hash: BlockHash,
+ height: u32,
+}
+
+impl BestBlock {
+ /// Returns the best block from the genesis of the given network.
+ pub fn from_genesis(network: Network) -> Self {
+ BestBlock {
+ block_hash: genesis_block(network).header.block_hash(),
+ height: 0,
+ }
+ }
+
+ /// Returns the best block as identified by the given block hash and height.
+ pub fn new(block_hash: BlockHash, height: u32) -> Self {
+ BestBlock { block_hash, height }
+ }
+
+ /// Returns the best block hash.
+ pub fn block_hash(&self) -> BlockHash { self.block_hash }
+
+ /// Returns the best block height.
+ pub fn height(&self) -> u32 { self.height }
+}
+
/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
-/// desirable to notify any listeners on `wait_timeout`/`wait` that new updates are available for
-/// persistence. Therefore, this struct is responsible for locking the total consistency lock and,
-/// upon going out of scope, sending the aforementioned notification (since the lock being released
-/// indicates that the updates are ready for persistence).
+/// desirable to notify any listeners on `await_persistable_update_timeout`/
+/// `await_persistable_update` that new updates are available for persistence. Therefore, this
+/// struct is responsible for locking the total consistency lock and, upon going out of scope,
+/// sending the aforementioned notification (since the lock being released indicates that the
+/// updates are ready for persistence).
struct PersistenceNotifierGuard<'a> {
persistence_notifier: &'a PersistenceNotifier,
// We hold onto this result so the lock doesn't get released immediately.
}
}
-/// The amount of time we require our counterparty wait to claim their money (ie time between when
-/// we, or our watchtower, must check for them having broadcast a theft transaction).
-pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
-/// The amount of time we're willing to wait to claim money back to us
-pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7;
+/// The amount of time in blocks we require our counterparty wait to claim their money (ie time
+/// between when we, or our watchtower, must check for them having broadcast a theft transaction).
+///
+/// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
+///
+/// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
+pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
+/// The amount of time in blocks we're willing to wait to claim money back to us. This matches
+/// the maximum required amount in lnd as of March 2021.
+pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
-/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
-/// ie the node we forwarded the payment on to should always have enough room to reliably time out
-/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
-/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
-const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
+/// HTLC's CLTV. The current default represents roughly six hours of blocks at six blocks/hour.
+///
+/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
+///
+/// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
+// This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
+// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
+// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
+// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
+pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 6;
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
+/// Minimum CLTV difference between the current block height and received inbound payments.
+/// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least
+/// this value.
+pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER;
+
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
// ie that if the next-hop peer fails the HTLC within
// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
// LATENCY_GRACE_PERIOD_BLOCKS.
#[deny(const_err)]
#[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
+const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
#[deny(const_err)]
#[allow(dead_code)]
-const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
#[derive(Clone)]
/// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
/// the peer is connected, and (c) no monitor update failure is pending resolution.
pub is_live: bool,
+
+ /// Information on the fees and requirements that the counterparty requires when forwarding
+ /// payments to us through this channel.
+ pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
}
/// If a payment fails to send, it can be in one of several states. This enum is returned as the
}
}
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<ChanSigner, M, T, K, F, L>
- where M::Target: chain::Watch<Keys=ChanSigner>,
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
///
/// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
///
- /// Users must provide the current blockchain height from which to track onchain channel
- /// funding outpoints and send payments with reliable timelocks.
- ///
/// Users need to notify the new ChannelManager when a new block is connected or
- /// disconnected using its `block_connected` and `block_disconnected` methods.
- pub fn new(network: Network, fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, current_blockchain_height: usize) -> Self {
- let secp_ctx = Secp256k1::new();
+ /// disconnected using its `block_connected` and `block_disconnected` methods, starting
+ /// from after `params.latest_hash`.
+ pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self {
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
ChannelManager {
default_configuration: config.clone(),
- genesis_hash: genesis_block(network).header.block_hash(),
+ genesis_hash: genesis_block(params.network).header.block_hash(),
fee_estimator: fee_est,
chain_monitor,
tx_broadcaster,
- latest_block_height: AtomicUsize::new(current_blockchain_height),
- last_block_hash: Mutex::new(Default::default()),
- secp_ctx,
+ best_block: RwLock::new(params.best_block),
channel_state: Mutex::new(ChannelHolder{
by_id: HashMap::new(),
claimable_htlcs: HashMap::new(),
pending_msg_events: Vec::new(),
}),
+ pending_inbound_payments: Mutex::new(HashMap::new()),
+
our_network_key: keys_manager.get_node_secret(),
+ our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
+ secp_ctx,
last_node_announcement_serial: AtomicUsize::new(0),
+ highest_seen_timestamp: AtomicUsize::new(0),
per_peer_state: RwLock::new(HashMap::new()),
pending_events: Mutex::new(Vec::new()),
+ pending_background_events: Mutex::new(Vec::new()),
total_consistency_lock: RwLock::new(()),
persistence_notifier: PersistenceNotifier::new(),
}
}
+ /// Gets the current configuration applied to all new channels, as
+ pub fn get_current_default_configuration(&self) -> &UserConfig {
+ &self.default_configuration
+ }
+
/// Creates a new outbound channel to the given remote node and with the given value.
///
- /// user_id will be provided back as user_channel_id in FundingGenerationReady and
- /// FundingBroadcastSafe events to allow tracking of which events correspond with which
- /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
- /// may wish to avoid using 0 for user_id here.
+ /// user_id will be provided back as user_channel_id in FundingGenerationReady events to allow
+ /// tracking of which events correspond with which create_channel call. Note that the
+ /// user_channel_id defaults to 0 for inbound channels, so you may wish to avoid using 0 for
+ /// user_id here. user_id has no meaning inside of LDK, it is simply copied to events and
+ /// otherwise ignored.
///
/// If successful, will generate a SendOpenChannel message event, so you should probably poll
/// PeerManager::process_events afterwards.
Ok(())
}
- fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<ChanSigner>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
+ fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<Signer>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
let mut res = Vec::new();
{
let channel_state = self.channel_state.lock().unwrap();
outbound_capacity_msat,
user_id: channel.get_user_id(),
is_live: channel.is_live(),
+ counterparty_forwarding_info: channel.counterparty_forwarding_info(),
});
}
}
#[inline]
fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
- let (funding_txo_option, monitor_update, mut failed_htlcs) = shutdown_res;
+ let (monitor_update_option, mut failed_htlcs) = shutdown_res;
log_trace!(self.logger, "Finishing force-closure of channel {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
}
- if let Some(funding_txo) = funding_txo_option {
+ if let Some((funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
}
}
- fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<(), APIError> {
+ fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: Option<&PublicKey>) -> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
if let Some(node_id) = peer_node_id {
if chan.get().get_counterparty_node_id() != *node_id {
- // Error or Ok here doesn't matter - the result is only exposed publicly
- // when peer_node_id is None anyway.
- return Ok(());
+ return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
}
}
if let Some(short_id) = chan.get().get_short_channel_id() {
});
}
- Ok(())
+ Ok(chan.get_counterparty_node_id())
}
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- self.force_close_channel_with_peer(channel_id, None)
+ match self.force_close_channel_with_peer(channel_id, None) {
+ Ok(counterparty_node_id) => {
+ self.channel_state.lock().unwrap().pending_msg_events.push(
+ events::MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+ },
+ }
+ );
+ Ok(())
+ },
+ Err(e) => Err(e)
+ }
}
/// Force close all channels, immediately broadcasting the latest local commitment transaction
}
}
- fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder<ChanSigner>>) {
+ fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder<Signer>>) {
macro_rules! return_malformed_err {
($msg: expr, $err_code: expr) => {
{
// HTLC_FAIL_BACK_BUFFER blocks to go.
// Also, ensure that, in the case of an unknown payment hash, our payment logic has enough time to fail the HTLC backward
// before our onchain logic triggers a channel closure (see HTLC_FAIL_BACK_BUFFER rational).
- if (msg.cltv_expiry as u64) <= self.latest_block_height.load(Ordering::Acquire) as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
+ if (msg.cltv_expiry as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
}
// final_incorrect_htlc_amount
msgs::OnionHopDataFormat::FinalNode { payment_data } => payment_data,
};
+ if payment_data.is_none() {
+ return_err!("We require payment_secrets", 0x4000|0x2000|3, &[0;0]);
+ }
+
// Note that we could obviously respond immediately with an update_fulfill_htlc
// message, however that would leak that we are the recipient of this payment, so
// instead we stay symmetric with the forwarding case, only responding (after a
PendingHTLCStatus::Forward(PendingHTLCInfo {
routing: PendingHTLCRouting::Receive {
- payment_data,
+ payment_data: payment_data.unwrap(),
incoming_cltv_expiry: msg.cltv_expiry,
},
payment_hash: msg.payment_hash.clone(),
if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
}
- if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+ if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + chan.get_cltv_expiry_delta() as u64 { // incorrect_cltv_expiry
break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
}
- let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let cur_height = self.best_block.read().unwrap().height() + 1;
// Theoretically, channel counterparty shouldn't send us a HTLC expiring now, but we want to be robust wrt to counterparty
// packet sanitization (see HTLC_FAIL_BACK_BUFFER rational)
if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
/// only fails if the channel does not yet have an assigned short_id
/// May be called with channel_state already locked!
- fn get_channel_update(&self, chan: &Channel<ChanSigner>) -> Result<msgs::ChannelUpdate, LightningError> {
+ fn get_channel_update(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
let short_channel_id = match chan.get_short_channel_id() {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
Some(id) => id,
short_channel_id,
timestamp: chan.get_update_time_counter(),
flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
- cltv_expiry_delta: CLTV_EXPIRY_DELTA,
+ cltv_expiry_delta: chan.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
fee_base_msat: chan.get_holder_fee_base_msat(&self.fee_estimator),
return Err(PaymentSendFailure::PathParameterError(path_errs));
}
- let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let cur_height = self.best_block.read().unwrap().height() + 1;
let mut results = Vec::new();
for path in route.paths.iter() {
results.push(self.send_payment_along_path(&path, &payment_hash, payment_secret, total_value, cur_height));
}
}
- /// Call this upon creation of a funding transaction for the given channel.
- ///
- /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
- /// or your counterparty can steal your funds!
- ///
- /// Panics if a funding transaction has already been provided for this channel.
- ///
- /// May panic if the funding_txo is duplicative with some other channel (note that this should
- /// be trivially prevented by using unique funding transaction keys per-channel).
- pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-
+ /// Handles the generation of a funding transaction, optionally (for tests) with a function
+ /// which checks the correctness of the funding transaction given the associated channel.
+ fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>
+ (&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, find_funding_output: FundingOutput) -> Result<(), APIError> {
let (chan, msg) = {
let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
Some(mut chan) => {
- (chan.get_outbound_funding_created(funding_txo, &self.logger)
+ let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+ (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
.map_err(|e| if let ChannelError::Close(msg) = e {
MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(true), None)
} else { unreachable!(); })
, chan)
},
- None => return
+ None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
};
match handle_error!(self, res, chan.get_counterparty_node_id()) {
Ok(funding_msg) => {
(chan, funding_msg)
},
- Err(_) => { return; }
+ Err(_) => { return Err(APIError::ChannelUnavailable {
+ err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+ }) },
}
};
e.insert(chan);
}
}
+ Ok(())
+ }
+
+ #[cfg(test)]
+ pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
+ self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |_, tx| {
+ Ok(OutPoint { txid: tx.txid(), index: output_index })
+ })
+ }
+
+ /// Call this upon creation of a funding transaction for the given channel.
+ ///
+ /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
+ /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
+ ///
+ /// Panics if a funding transaction has already been provided for this channel.
+ ///
+ /// May panic if the output found in the funding transaction is duplicative with some other
+ /// channel (note that this should be trivially prevented by using unique funding transaction
+ /// keys per-channel).
+ ///
+ /// Do NOT broadcast the funding transaction yourself. When we have safely received our
+ /// counterparty's signature the funding transaction will automatically be broadcast via the
+ /// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
+ ///
+ /// Note that this includes RBF or similar transaction replacement strategies - lightning does
+ /// not currently support replacing a funding transaction on an existing channel. Instead,
+ /// create a new channel with a conflicting funding transaction.
+ pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
+ let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+
+ for inp in funding_transaction.input.iter() {
+ if inp.witness.is_empty() {
+ return Err(APIError::APIMisuseError {
+ err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
+ });
+ }
+ }
+ self.funding_transaction_generated_intern(temporary_channel_id, funding_transaction, |chan, tx| {
+ let mut output_index = None;
+ let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
+ for (idx, outp) in tx.output.iter().enumerate() {
+ if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
+ if output_index.is_some() {
+ return Err(APIError::APIMisuseError {
+ err: "Multiple outputs matched the expected script and value".to_owned()
+ });
+ }
+ if idx > u16::max_value() as usize {
+ return Err(APIError::APIMisuseError {
+ err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
+ });
+ }
+ output_index = Some(idx as u16);
+ }
+ }
+ if output_index.is_none() {
+ return Err(APIError::APIMisuseError {
+ err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
+ });
+ }
+ Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() })
+ })
}
- fn get_announcement_sigs(&self, chan: &Channel<ChanSigner>) -> Option<msgs::AnnouncementSignatures> {
+ fn get_announcement_sigs(&self, chan: &Channel<Signer>) -> Option<msgs::AnnouncementSignatures> {
if !chan.should_announce() {
log_trace!(self.logger, "Can't send announcement_signatures for private channel {}", log_bytes!(chan.channel_id()));
return None
routing: PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry },
incoming_shared_secret, payment_hash, amt_to_forward, .. },
prev_funding_outpoint } => {
- let prev_hop = HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- };
-
- let mut total_value = 0;
- let payment_secret_opt =
- if let &Some(ref data) = &payment_data { Some(data.payment_secret.clone()) } else { None };
- let htlcs = channel_state.claimable_htlcs.entry((payment_hash, payment_secret_opt))
- .or_insert(Vec::new());
- htlcs.push(ClaimableHTLC {
- prev_hop,
+ let claimable_htlc = ClaimableHTLC {
+ prev_hop: HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ },
value: amt_to_forward,
payment_data: payment_data.clone(),
cltv_expiry: incoming_cltv_expiry,
- });
- if let &Some(ref data) = &payment_data {
- for htlc in htlcs.iter() {
- total_value += htlc.value;
- if htlc.payment_data.as_ref().unwrap().total_msat != data.total_msat {
- total_value = msgs::MAX_VALUE_MSAT;
- }
- if total_value >= msgs::MAX_VALUE_MSAT { break; }
- }
- if total_value >= msgs::MAX_VALUE_MSAT || total_value > data.total_msat {
- for htlc in htlcs.iter() {
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(
- &byte_utils::be32_to_array(
- self.latest_block_height.load(Ordering::Acquire)
- as u32,
- ),
- );
- failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: htlc.prev_hop.short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: htlc.prev_hop.htlc_id,
- incoming_packet_shared_secret: htlc.prev_hop.incoming_packet_shared_secret,
- }), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
- ));
- }
- } else if total_value == data.total_msat {
- new_events.push(events::Event::PaymentReceived {
- payment_hash,
- payment_secret: Some(data.payment_secret),
- amt: total_value,
- });
+ };
+
+ macro_rules! fail_htlc {
+ ($htlc: expr) => {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(
+ &byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
+ );
+ failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: $htlc.prev_hop.short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: $htlc.prev_hop.htlc_id,
+ incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
+ }), payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+ ));
}
- } else {
- new_events.push(events::Event::PaymentReceived {
- payment_hash,
- payment_secret: None,
- amt: amt_to_forward,
- });
}
+
+ // Check that the payment hash and secret are known. Note that we
+ // MUST take care to handle the "unknown payment hash" and
+ // "incorrect payment secret" cases here identically or we'd expose
+ // that we are the ultimate recipient of the given payment hash.
+ // Further, we must not expose whether we have any other HTLCs
+ // associated with the same payment_hash pending or not.
+ let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
+ match payment_secrets.entry(payment_hash) {
+ hash_map::Entry::Vacant(_) => {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we didn't have a corresponding inbound payment.", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ },
+ hash_map::Entry::Occupied(inbound_payment) => {
+ if inbound_payment.get().payment_secret != payment_data.payment_secret {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
+ fail_htlc!(claimable_htlc);
+ } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
+ log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
+ fail_htlc!(claimable_htlc);
+ } else {
+ let mut total_value = 0;
+ let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
+ .or_insert(Vec::new());
+ htlcs.push(claimable_htlc);
+ for htlc in htlcs.iter() {
+ total_value += htlc.value;
+ if htlc.payment_data.total_msat != payment_data.total_msat {
+ log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
+ log_bytes!(payment_hash.0), payment_data.total_msat, htlc.payment_data.total_msat);
+ total_value = msgs::MAX_VALUE_MSAT;
+ }
+ if total_value >= msgs::MAX_VALUE_MSAT { break; }
+ }
+ if total_value >= msgs::MAX_VALUE_MSAT || total_value > payment_data.total_msat {
+ log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
+ log_bytes!(payment_hash.0), total_value, payment_data.total_msat);
+ for htlc in htlcs.iter() {
+ fail_htlc!(htlc);
+ }
+ } else if total_value == payment_data.total_msat {
+ new_events.push(events::Event::PaymentReceived {
+ payment_hash,
+ payment_preimage: inbound_payment.get().payment_preimage,
+ payment_secret: payment_data.payment_secret,
+ amt: total_value,
+ user_payment_id: inbound_payment.get().user_payment_id,
+ });
+ // Only ever generate at most one PaymentReceived
+ // per registered payment_hash, even if it isn't
+ // claimed.
+ inbound_payment.remove_entry();
+ } else {
+ // Nothing to do - we haven't reached the total
+ // payment value yet, wait until we receive more
+ // MPP parts.
+ }
+ }
+ },
+ };
},
HTLCForwardInfo::AddHTLC { .. } => {
panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
events.append(&mut new_events);
}
+ /// Free the background events, generally called from timer_tick_occurred.
+ ///
+ /// Exposed for testing to allow us to process events quickly without generating accidental
+ /// BroadcastChannelUpdate events in timer_tick_occurred.
+ ///
+ /// Expects the caller to have a total_consistency_lock read lock.
+ fn process_background_events(&self) {
+ let mut background_events = Vec::new();
+ mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
+ for event in background_events.drain(..) {
+ match event {
+ BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
+ // The channel has already been closed, so no use bothering to care about the
+ // monitor updating completing.
+ let _ = self.chain_monitor.update_channel(funding_txo, update);
+ },
+ }
+ }
+ }
+
+ #[cfg(any(test, feature = "_test_utils"))]
+ pub(crate) fn test_process_background_events(&self) {
+ self.process_background_events();
+ }
+
/// If a peer is disconnected we mark any channels with that peer as 'disabled'.
/// After some time, if channels are still disabled we need to broadcast a ChannelUpdate
/// to inform the network about the uselessness of these channels.
///
/// This method handles all the details, and must be called roughly once per minute.
- pub fn timer_chan_freshness_every_min(&self) {
+ ///
+ /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
+ pub fn timer_tick_occurred(&self) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ self.process_background_events();
+
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
for (_, chan) in channel_state.by_id.iter_mut() {
/// along the path (including in our own channel on which we received it).
/// Returns false if no payment was found to fail backwards, true if the process of failing the
/// HTLC backwards has been started.
- pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>) -> bool {
+ pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(*payment_hash, *payment_secret));
+ let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
if let Some(mut sources) = removed_source {
for htlc in sources.drain(..) {
if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
- self.latest_block_height.load(Ordering::Acquire) as u32,
- ));
+ self.best_block.read().unwrap().height()));
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
/// to fail and take the channel_state lock for each iteration (as we take ownership and may
/// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
/// still-available channels.
- fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<ChanSigner>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+ fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
//identify whether we sent it or not based on the (I presume) very different runtime
//between the branches here. We should make this async and move it into the forward HTLCs
//timer handling.
+
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // from block_connected which may run during initialization prior to the chain_monitor
+ // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
HTLCSource::OutboundRoute { ref path, .. } => {
log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
/// generating message events for the net layer to claim the payment, if possible. Thus, you
/// should probably kick the net layer to go send messages if this returns true!
///
- /// You must specify the expected amounts for this HTLC, and we will only claim HTLCs
- /// available within a few percent of the expected amount. This is critical for several
- /// reasons : a) it avoids providing senders with `proof-of-payment` (in the form of the
- /// payment_preimage without having provided the full value and b) it avoids certain
- /// privacy-breaking recipient-probing attacks which may reveal payment activity to
- /// motivated attackers.
- ///
- /// Note that the privacy concerns in (b) are not relevant in payments with a payment_secret
- /// set. Thus, for such payments we will claim any payments which do not under-pay.
+ /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
+ /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
+ /// event matches your expectation. If you fail to do so and call this method, you may provide
+ /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
///
/// May panic if called except in response to a PaymentReceived event.
- pub fn claim_funds(&self, payment_preimage: PaymentPreimage, payment_secret: &Option<PaymentSecret>, expected_amount: u64) -> bool {
+ ///
+ /// [`create_inbound_payment`]: Self::create_inbound_payment
+ /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+ pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state = Some(self.channel_state.lock().unwrap());
- let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&(payment_hash, *payment_secret));
+ let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
if let Some(mut sources) = removed_source {
assert!(!sources.is_empty());
// we got all the HTLCs and then a channel closed while we were waiting for the user to
// provide the preimage, so worrying too much about the optimal handling isn't worth
// it.
-
- let (is_mpp, mut valid_mpp) = if let &Some(ref data) = &sources[0].payment_data {
- assert!(payment_secret.is_some());
- (true, data.total_msat >= expected_amount)
- } else {
- assert!(payment_secret.is_none());
- (false, false)
- };
-
+ let mut valid_mpp = true;
for htlc in sources.iter() {
- if !is_mpp || !valid_mpp { break; }
if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
valid_mpp = false;
+ break;
}
}
let mut errs = Vec::new();
let mut claimed_any_htlcs = false;
for htlc in sources.drain(..) {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- if (is_mpp && !valid_mpp) || (!is_mpp && (htlc.value < expected_amount || htlc.value > expected_amount * 2)) {
+ if !valid_mpp {
+ if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
- self.latest_block_height.load(Ordering::Acquire) as u32,
- ));
+ self.best_block.read().unwrap().height()));
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
claimed_any_htlcs = true;
} else { errs.push(e); }
},
- Err(None) if is_mpp => unreachable!("We already checked for channel existence, we can't fail here!"),
- Err(None) => {
- log_warn!(self.logger, "Channel we expected to claim an HTLC from was closed.");
- },
+ Err(None) => unreachable!("We already checked for channel existence, we can't fail here!"),
Ok(()) => claimed_any_htlcs = true,
}
}
} else { false }
}
- fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<ChanSigner>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
+ fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> Result<(), Option<(PublicKey, MsgHandleErrInternal)>> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
let channel_state = &mut **channel_state_lock;
let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
} else { unreachable!(); }
}
- fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<ChanSigner>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+ fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
match source {
HTLCSource::OutboundRoute { .. } => {
mem::drop(channel_state_lock);
/// Gets the node_id held by this ChannelManager
pub fn get_our_node_id(&self) -> PublicKey {
- PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
+ self.our_network_pubkey.clone()
}
/// Restores a single, given channel to normal operation after a
return;
}
- let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored(&self.logger);
+ let (raa, commitment_update, order, pending_forwards, mut pending_failures, funding_broadcastable, funding_locked) = channel.monitor_updating_restored(&self.logger);
if !pending_forwards.is_empty() {
htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards));
}
handle_cs!();
},
}
- if needs_broadcast_safe {
- pending_events.push(events::Event::FundingBroadcastSafe {
- funding_txo: channel.get_funding_txo().unwrap(),
- user_channel_id: channel.get_user_id(),
- });
+ if let Some(tx) = funding_broadcastable {
+ self.tx_broadcaster.broadcast_transaction(&tx);
}
if let Some(msg) = funding_locked {
pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
let ((funding_msg, monitor), mut chan) = {
+ let best_block = *self.best_block.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
}
- (try_chan_entry!(self, chan.get_mut().funding_created(msg, &self.logger), channel_state, chan), chan.remove())
+ (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
}
// We do not do a force-close here as that would generate a monitor update for
// a monitor that we didn't manage to store (and that we don't care about - we
// don't respond with the funding_signed so the channel can never go on chain).
- let (_funding_txo_option, _monitor_update, failed_htlcs) = chan.force_shutdown(true);
+ let (_monitor_update, failed_htlcs) = chan.force_shutdown(true);
assert!(failed_htlcs.is_empty());
return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
},
}
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
- let (funding_txo, user_id) = {
+ let funding_tx = {
+ let best_block = *self.best_block.read().unwrap();
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
match channel_state.by_id.entry(msg.channel_id) {
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let monitor = match chan.get_mut().funding_signed(&msg, &self.logger) {
+ let (monitor, funding_tx) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
Ok(update) => update,
Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
};
if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
}
- (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
+ funding_tx
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::FundingBroadcastSafe {
- funding_txo,
- user_channel_id: user_id,
- });
+ self.tx_broadcaster.broadcast_transaction(&funding_tx);
Ok(())
}
}
}
- fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
+ fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
let (mut dropped_htlcs, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &msg), channel_state, chan_entry);
+ let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.fee_estimator, &their_features, &msg), channel_state, chan_entry);
if let Some(msg) = shutdown {
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id.clone(),
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- let create_pending_htlc_status = |chan: &Channel<ChanSigner>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ let create_pending_htlc_status = |chan: &Channel<Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
// Ensure error_code has the UPDATE flag set, since by default we send a
// channel update along as part of failing the HTLC.
assert!((error_code & 0x1000) != 0);
Ok(())
}
+ fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
+ Some(chan_id) => chan_id.clone(),
+ None => {
+ // It's not a local channel
+ return Ok(())
+ }
+ };
+ match channel_state.by_id.entry(chan_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+ // TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), chan_id));
+ }
+ try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
+ },
+ hash_map::Entry::Vacant(_) => unreachable!()
+ }
+ Ok(())
+ }
+
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
msg: update
});
}
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ },
+ });
}
},
}
self.finish_force_close_channel(failure);
}
}
+
+ /// Handle a list of channel failures during a block_connected or block_disconnected call,
+ /// pushing the channel monitor update (if any) to the background events queue and removing the
+ /// Channel object.
+ fn handle_init_event_channel_failures(&self, mut failed_channels: Vec<ShutdownResult>) {
+ for mut failure in failed_channels.drain(..) {
+ // Either a commitment transactions has been confirmed on-chain or
+ // Channel::block_disconnected detected that the funding transaction has been
+ // reorganized out of the main chain.
+ // We cannot broadcast our latest local state via monitor update (as
+ // Channel::force_shutdown tries to make us do) as we may still be in initialization,
+ // so we track the update internally and handle it when the user next calls
+ // timer_tick_occurred, guaranteeing we're running normally.
+ if let Some((funding_txo, update)) = failure.0.take() {
+ assert_eq!(update.updates.len(), 1);
+ if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
+ assert!(should_broadcast);
+ } else { unreachable!(); }
+ self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
+ }
+ self.finish_force_close_channel(failure);
+ }
+ }
+
+ fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
+ assert!(invoice_expiry_delta_secs <= 60*60*24*365); // Sadly bitcoin timestamps are u32s, so panic before 2106
+
+ let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes());
+
+ let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
+ match payment_secrets.entry(payment_hash) {
+ hash_map::Entry::Vacant(e) => {
+ e.insert(PendingInboundPayment {
+ payment_secret, min_value_msat, user_payment_id, payment_preimage,
+ // We assume that highest_seen_timestamp is pretty close to the current time -
+ // its updated when we receive a new block with the maximum time we've seen in
+ // a header. It should never be more than two hours in the future.
+ // Thus, we add two hours here as a buffer to ensure we absolutely
+ // never fail a payment too early.
+ // Note that we assume that received blocks have reasonably up-to-date
+ // timestamps.
+ expiry_time: self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + invoice_expiry_delta_secs as u64 + 7200,
+ });
+ },
+ hash_map::Entry::Occupied(_) => return Err(APIError::APIMisuseError { err: "Duplicate payment hash".to_owned() }),
+ }
+ Ok(payment_secret)
+ }
+
+ /// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
+ /// to pay us.
+ ///
+ /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
+ /// [`PaymentHash`] and [`PaymentPreimage`] for you, returning the first and storing the second.
+ ///
+ /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentReceived`], which
+ /// will have the [`PaymentReceived::payment_preimage`] field filled in. That should then be
+ /// passed directly to [`claim_funds`].
+ ///
+ /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
+ ///
+ /// [`claim_funds`]: Self::claim_funds
+ /// [`PaymentReceived`]: events::Event::PaymentReceived
+ /// [`PaymentReceived::payment_preimage`]: events::Event::PaymentReceived::payment_preimage
+ /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
+ pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> (PaymentHash, PaymentSecret) {
+ let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes());
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+
+ (payment_hash,
+ self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs, user_payment_id)
+ .expect("RNG Generated Duplicate PaymentHash"))
+ }
+
+ /// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
+ /// stored external to LDK.
+ ///
+ /// A [`PaymentReceived`] event will only be generated if the [`PaymentSecret`] matches a
+ /// payment secret fetched via this method or [`create_inbound_payment`], and which is at least
+ /// the `min_value_msat` provided here, if one is provided.
+ ///
+ /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) must be globally unique. This
+ /// method may return an Err if another payment with the same payment_hash is still pending.
+ ///
+ /// `user_payment_id` will be provided back in [`PaymentReceived::user_payment_id`] events to
+ /// allow tracking of which events correspond with which calls to this and
+ /// [`create_inbound_payment`]. `user_payment_id` has no meaning inside of LDK, it is simply
+ /// copied to events and otherwise ignored. It may be used to correlate PaymentReceived events
+ /// with invoice metadata stored elsewhere.
+ ///
+ /// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
+ /// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
+ /// before a [`PaymentReceived`] event will be generated, ensuring that we do not provide the
+ /// sender "proof-of-payment" unless they have paid the required amount.
+ ///
+ /// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
+ /// in excess of the current time. This should roughly match the expiry time set in the invoice.
+ /// After this many seconds, we will remove the inbound payment, resulting in any attempts to
+ /// pay the invoice failing. The BOLT spec suggests 7,200 secs as a default validity time for
+ /// invoices when no timeout is set.
+ ///
+ /// Note that we use block header time to time-out pending inbound payments (with some margin
+ /// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will
+ /// accept a payment and generate a [`PaymentReceived`] event for some time after the expiry.
+ /// If you need exact expiry semantics, you should enforce them upon receipt of
+ /// [`PaymentReceived`].
+ ///
+ /// May panic if `invoice_expiry_delta_secs` is greater than one year.
+ ///
+ /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry`
+ /// set to at least [`MIN_FINAL_CLTV_EXPIRY`].
+ ///
+ /// [`create_inbound_payment`]: Self::create_inbound_payment
+ /// [`PaymentReceived`]: events::Event::PaymentReceived
+ /// [`PaymentReceived::user_payment_id`]: events::Event::PaymentReceived::user_payment_id
+ pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
+ self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
+ }
}
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
- where M::Target: chain::Watch<Keys=ChanSigner>,
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
}
}
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<ChanSigner, M, T, K, F, L>
- where M::Target: chain::Watch<Keys=ChanSigner>,
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<Signer, M, T, K, F, L>
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
}
}
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<ChanSigner, M, T, K, F, L>
- where M::Target: chain::Watch<Keys=ChanSigner>,
- T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
- F::Target: FeeEstimator,
- L::Target: Logger,
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Listen for ChannelManager<Signer, M, T, K, F, L>
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- /// Updates channel state based on transactions seen in a connected block.
- pub fn block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
- let header_hash = header.block_hash();
- log_trace!(self.logger, "Block {} at height {} connected", header_hash, height);
+ fn block_connected(&self, block: &Block, height: u32) {
+ {
+ let best_block = self.best_block.read().unwrap();
+ assert_eq!(best_block.block_hash(), block.header.prev_blockhash,
+ "Blocks must be connected in chain-order - the connected header must build on the last connected header");
+ assert_eq!(best_block.height(), height - 1,
+ "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
+ }
+
+ let txdata: Vec<_> = block.txdata.iter().enumerate().collect();
+ self.transactions_confirmed(&block.header, &txdata, height);
+ self.best_block_updated(&block.header, height);
+ }
+
+ fn block_disconnected(&self, header: &BlockHeader, height: u32) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let new_height = height - 1;
+ {
+ let mut best_block = self.best_block.write().unwrap();
+ assert_eq!(best_block.block_hash(), header.block_hash(),
+ "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
+ assert_eq!(best_block.height(), height,
+ "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
+ *best_block = BestBlock::new(header.prev_blockhash, new_height)
+ }
+
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time));
+ }
+}
+
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Confirm for ChannelManager<Signer, M, T, K, F, L>
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // during initialization prior to the chain_monitor being fully configured in some cases.
+ // See the docs for `ChannelManagerReadArgs` for more.
+
+ let block_hash = header.block_hash();
+ log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
+
+ let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new())));
+ }
+
+ fn best_block_updated(&self, header: &BlockHeader, height: u32) {
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // during initialization prior to the chain_monitor being fully configured in some cases.
+ // See the docs for `ChannelManagerReadArgs` for more.
+
+ let block_hash = header.block_hash();
+ log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
+
+ let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+
+ *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
+
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time));
+
+ macro_rules! max_time {
+ ($timestamp: expr) => {
+ loop {
+ // Update $timestamp to be the max of its current value and the block
+ // timestamp. This should keep us close to the current time without relying on
+ // having an explicit local time source.
+ // Just in case we end up in a race, we loop until we either successfully
+ // update $timestamp or decide we don't need to.
+ let old_serial = $timestamp.load(Ordering::Acquire);
+ if old_serial >= header.time as usize { break; }
+ if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
+ break;
+ }
+ }
+ }
+ }
+ max_time!(self.last_node_announcement_serial);
+ max_time!(self.highest_seen_timestamp);
+ let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
+ payment_secrets.retain(|_, inbound_payment| {
+ inbound_payment.expiry_time > header.time as u64
+ });
+ }
+
+ fn get_relevant_txids(&self) -> Vec<Txid> {
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut res = Vec::with_capacity(channel_state.short_to_id.len());
+ for chan in channel_state.by_id.values() {
+ if let Some(funding_txo) = chan.get_funding_txo() {
+ res.push(funding_txo.txid);
+ }
+ }
+ res
+ }
+
+ fn transaction_unconfirmed(&self, txid: &Txid) {
+ let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ self.do_chain_event(None, |channel| {
+ if let Some(funding_txo) = channel.get_funding_txo() {
+ if funding_txo.txid == *txid {
+ channel.funding_transaction_unconfirmed().map(|_| (None, Vec::new()))
+ } else { Ok((None, Vec::new())) }
+ } else { Ok((None, Vec::new())) }
+ });
+ }
+}
+
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
+{
+ /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
+ /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
+ /// the function.
+ fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>), msgs::ErrorMessage>>
+ (&self, height_opt: Option<u32>, f: FN) {
+ // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
+ // during initialization prior to the chain_monitor being fully configured in some cases.
+ // See the docs for `ChannelManagerReadArgs` for more.
+
let mut failed_channels = Vec::new();
let mut timed_out_htlcs = Vec::new();
{
let short_to_id = &mut channel_state.short_to_id;
let pending_msg_events = &mut channel_state.pending_msg_events;
channel_state.by_id.retain(|_, channel| {
- let res = channel.block_connected(header, txdata, height);
+ let res = f(channel);
if let Ok((chan_res, mut timed_out_pending_htlcs)) = res {
for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
let chan_update = self.get_channel_update(&channel).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
}
} else if let Err(e) = res {
+ if let Some(short_id) = channel.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ // It looks like our counterparty went on-chain or funding transaction was
+ // reorged out of the main chain. Close the channel.
+ failed_channels.push(channel.force_shutdown(true));
+ if let Ok(update) = self.get_channel_update(&channel) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: channel.get_counterparty_node_id(),
action: msgs::ErrorAction::SendErrorMessage { msg: e },
});
return false;
}
- if let Some(funding_txo) = channel.get_funding_txo() {
- for &(_, tx) in txdata.iter() {
- for inp in tx.input.iter() {
- if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
- log_trace!(self.logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
- if let Some(short_id) = channel.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- // It looks like our counterparty went on-chain. We go ahead and
- // broadcast our latest local state as well here, just in case its
- // some kind of SPV attack, though we expect these to be dropped.
- failed_channels.push(channel.force_shutdown(true));
- if let Ok(update) = self.get_channel_update(&channel) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- return false;
- }
- }
- }
- }
true
});
- channel_state.claimable_htlcs.retain(|&(ref payment_hash, _), htlcs| {
- htlcs.retain(|htlc| {
- // If height is approaching the number of blocks we think it takes us to get
- // our commitment transaction confirmed before the HTLC expires, plus the
- // number of blocks we generally consider it to take to do a commitment update,
- // just give up on it and fail the HTLC.
- if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
- let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
- htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
- timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
- failure_code: 0x4000 | 15,
- data: htlc_msat_height_data
- }));
- false
- } else { true }
+ if let Some(height) = height_opt {
+ channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
+ htlcs.retain(|htlc| {
+ // If height is approaching the number of blocks we think it takes us to get
+ // our commitment transaction confirmed before the HTLC expires, plus the
+ // number of blocks we generally consider it to take to do a commitment update,
+ // just give up on it and fail the HTLC.
+ if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
+ let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
+ htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+ timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
+ failure_code: 0x4000 | 15,
+ data: htlc_msat_height_data
+ }));
+ false
+ } else { true }
+ });
+ !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
});
- !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
- });
- }
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
+ }
}
+ self.handle_init_event_channel_failures(failed_channels);
+
for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
}
- self.latest_block_height.store(height as usize, Ordering::Release);
- *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
- loop {
- // Update last_node_announcement_serial to be the max of its current value and the
- // block timestamp. This should keep us close to the current time without relying on
- // having an explicit local time source.
- // Just in case we end up in a race, we loop until we either successfully update
- // last_node_announcement_serial or decide we don't need to.
- let old_serial = self.last_node_announcement_serial.load(Ordering::Acquire);
- if old_serial >= header.time as usize { break; }
- if self.last_node_announcement_serial.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
- break;
- }
- }
- }
-
- /// Updates channel state based on a disconnected block.
- ///
- /// If necessary, the channel may be force-closed without letting the counterparty participate
- /// in the shutdown.
- pub fn block_disconnected(&self, header: &BlockHeader) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- let mut failed_channels = Vec::new();
- {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- channel_state.by_id.retain(|_, v| {
- if v.block_disconnected(header) {
- if let Some(short_id) = v.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- failed_channels.push(v.force_shutdown(true));
- if let Ok(update) = self.get_channel_update(&v) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- false
- } else {
- true
- }
- });
- }
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
- }
- self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
- *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.block_hash();
}
/// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
- /// indicating whether persistence is necessary. Only one listener on `wait_timeout` is
- /// guaranteed to be woken up.
+ /// indicating whether persistence is necessary. Only one listener on
+ /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
+ /// up.
/// Note that the feature `allow_wallclock_use` must be enabled to use this function.
#[cfg(any(test, feature = "allow_wallclock_use"))]
- pub fn wait_timeout(&self, max_wait: Duration) -> bool {
+ pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
self.persistence_notifier.wait_timeout(max_wait)
}
- /// Blocks until ChannelManager needs to be persisted. Only one listener on `wait` is
- /// guaranteed to be woken up.
- pub fn wait(&self) {
+ /// Blocks until ChannelManager needs to be persisted. Only one listener on
+ /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
+ /// up.
+ pub fn await_persistable_update(&self) {
self.persistence_notifier.wait()
}
}
}
-impl<ChanSigner: ChannelKeys, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
- ChannelMessageHandler for ChannelManager<ChanSigner, M, T, K, F, L>
- where M::Target: chain::Watch<Keys=ChanSigner>,
+impl<Signer: Sign, M: Deref + Sync + Send, T: Deref + Sync + Send, K: Deref + Sync + Send, F: Deref + Sync + Send, L: Deref + Sync + Send>
+ ChannelMessageHandler for ChannelManager<Signer, M, T, K, F, L>
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
}
- fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
+ fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id);
+ let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, their_features, msg), *counterparty_node_id);
}
fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
}
+ fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
+ let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
+ }
+
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
&events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
+ &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
}
});
}
}
/// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to
-/// disk/backups, through `wait_timeout` and `wait`.
+/// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`.
struct PersistenceNotifier {
/// Users won't access the persistence_lock directly, but rather wait on its bool using
/// `wait_timeout` and `wait`.
},
&PendingHTLCRouting::Receive { ref payment_data, ref incoming_cltv_expiry } => {
1u8.write(writer)?;
- payment_data.write(writer)?;
+ payment_data.payment_secret.write(writer)?;
+ payment_data.total_msat.write(writer)?;
incoming_cltv_expiry.write(writer)?;
},
}
short_channel_id: Readable::read(reader)?,
},
1u8 => PendingHTLCRouting::Receive {
- payment_data: Readable::read(reader)?,
+ payment_data: msgs::FinalOnionHopData {
+ payment_secret: Readable::read(reader)?,
+ total_msat: Readable::read(reader)?,
+ },
incoming_cltv_expiry: Readable::read(reader)?,
},
_ => return Err(DecodeError::InvalidValue),
incoming_packet_shared_secret
});
-impl_writeable!(ClaimableHTLC, 0, {
- prev_hop,
- value,
- payment_data,
- cltv_expiry
-});
+impl Writeable for ClaimableHTLC {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ self.prev_hop.write(writer)?;
+ self.value.write(writer)?;
+ self.payment_data.payment_secret.write(writer)?;
+ self.payment_data.total_msat.write(writer)?;
+ self.cltv_expiry.write(writer)
+ }
+}
+
+impl Readable for ClaimableHTLC {
+ fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+ Ok(ClaimableHTLC {
+ prev_hop: Readable::read(reader)?,
+ value: Readable::read(reader)?,
+ payment_data: msgs::FinalOnionHopData {
+ payment_secret: Readable::read(reader)?,
+ total_msat: Readable::read(reader)?,
+ },
+ cltv_expiry: Readable::read(reader)?,
+ })
+ }
+}
impl Writeable for HTLCSource {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
}
}
-impl<ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<ChanSigner, M, T, K, F, L>
- where M::Target: chain::Watch<Keys=ChanSigner>,
+impl_writeable!(PendingInboundPayment, 0, {
+ payment_secret,
+ expiry_time,
+ user_payment_id,
+ payment_preimage,
+ min_value_msat
+});
+
+impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
self.genesis_hash.write(writer)?;
- (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
- self.last_block_hash.lock().unwrap().write(writer)?;
+ {
+ let best_block = self.best_block.read().unwrap();
+ best_block.height().write(writer)?;
+ best_block.block_hash().write(writer)?;
+ }
let channel_state = self.channel_state.lock().unwrap();
let mut unfunded_channels = 0;
event.write(writer)?;
}
+ let background_events = self.pending_background_events.lock().unwrap();
+ (background_events.len() as u64).write(writer)?;
+ for event in background_events.iter() {
+ match event {
+ BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
+ 0u8.write(writer)?;
+ funding_txo.write(writer)?;
+ monitor_update.write(writer)?;
+ },
+ }
+ }
+
(self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
+ (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
+
+ let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
+ (pending_inbound_payments.len() as u64).write(writer)?;
+ for (hash, pending_payment) in pending_inbound_payments.iter() {
+ hash.write(writer)?;
+ pending_payment.write(writer)?;
+ }
Ok(())
}
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
/// is:
/// 1) Deserialize all stored ChannelMonitors.
-/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash,
-/// ChannelManager)>::read(reader, args).
+/// 2) Deserialize the ChannelManager by filling in this struct and calling:
+/// <(BlockHash, ChannelManager)>::read(reader, args)
/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
-/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
-/// ChannelMonitor::get_outputs_to_watch() and ChannelMonitor::get_funding_txo().
+/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same
+/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and
+/// ChannelMonitor::get_funding_txo().
/// 4) Reconnect blocks on your ChannelMonitors.
-/// 5) Move the ChannelMonitors into your local chain::Watch.
-/// 6) Disconnect/connect blocks on the ChannelManager.
-pub struct ChannelManagerReadArgs<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- where M::Target: chain::Watch<Keys=ChanSigner>,
+/// 5) Disconnect/connect blocks on the ChannelManager.
+/// 6) Move the ChannelMonitors into your local chain::Watch.
+///
+/// Note that the ordering of #4-6 is not of importance, however all three must occur before you
+/// call any other methods on the newly-deserialized ChannelManager.
+///
+/// Note that because some channels may be closed during deserialization, it is critical that you
+/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
+/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
+/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
+/// not force-close the same channels but consider them live), you may end up revoking a state for
+/// which you've already broadcasted the transaction.
+pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
/// this struct.
///
/// (C-not exported) because we have no HashMap bindings
- pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<ChanSigner>>,
+ pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<Signer>>,
}
-impl<'a, ChanSigner: 'a + ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>
- where M::Target: chain::Watch<Keys=ChanSigner>,
+impl<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
/// HashMap for you. This is primarily useful for C bindings where it is not practical to
/// populate a HashMap directly from C.
pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig,
- mut channel_monitors: Vec<&'a mut ChannelMonitor<ChanSigner>>) -> Self {
+ mut channel_monitors: Vec<&'a mut ChannelMonitor<Signer>>) -> Self {
Self {
keys_manager, fee_estimator, chain_monitor, tx_broadcaster, logger, default_config,
channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
// SipmleArcChannelManager type:
-impl<'a, ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<ChanSigner, M, T, K, F, L>>)
- where M::Target: chain::Watch<Keys=ChanSigner>,
+impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<Signer, M, T, K, F, L>>)
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>) -> Result<Self, DecodeError> {
- let (blockhash, chan_manager) = <(BlockHash, ChannelManager<ChanSigner, M, T, K, F, L>)>::read(reader, args)?;
+ fn read<R: ::std::io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
}
-impl<'a, ChanSigner: ChannelKeys, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>> for (BlockHash, ChannelManager<ChanSigner, M, T, K, F, L>)
- where M::Target: chain::Watch<Keys=ChanSigner>,
+impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
+ ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (BlockHash, ChannelManager<Signer, M, T, K, F, L>)
+ where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
L::Target: Logger,
{
- fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, ChanSigner, M, T, K, F, L>) -> Result<Self, DecodeError> {
+ fn read<R: ::std::io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
let _ver: u8 = Readable::read(reader)?;
let min_ver: u8 = Readable::read(reader)?;
if min_ver > SERIALIZATION_VERSION {
}
let genesis_hash: BlockHash = Readable::read(reader)?;
- let latest_block_height: u32 = Readable::read(reader)?;
- let last_block_hash: BlockHash = Readable::read(reader)?;
+ let best_block_height: u32 = Readable::read(reader)?;
+ let best_block_hash: BlockHash = Readable::read(reader)?;
let mut failed_htlcs = Vec::new();
let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
for _ in 0..channel_count {
- let mut channel: Channel<ChanSigner> = Channel::read(reader, &args.keys_manager)?;
- if channel.last_block_connected != Default::default() && channel.last_block_connected != last_block_hash {
- return Err(DecodeError::InvalidValue);
- }
-
+ let mut channel: Channel<Signer> = Channel::read(reader, &args.keys_manager)?;
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
// But if the channel is behind of the monitor, close the channel:
- let (_, _, mut new_failed_htlcs) = channel.force_shutdown(true);
+ let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
failed_htlcs.append(&mut new_failed_htlcs);
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
} else {
}
}
+ let background_event_count: u64 = Readable::read(reader)?;
+ let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
+ for _ in 0..background_event_count {
+ match <u8 as Readable>::read(reader)? {
+ 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
+ _ => return Err(DecodeError::InvalidValue),
+ }
+ }
+
let last_node_announcement_serial: u32 = Readable::read(reader)?;
+ let highest_seen_timestamp: u32 = Readable::read(reader)?;
+
+ let pending_inbound_payment_count: u64 = Readable::read(reader)?;
+ let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
+ for _ in 0..pending_inbound_payment_count {
+ if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
let channel_manager = ChannelManager {
genesis_hash,
chain_monitor: args.chain_monitor,
tx_broadcaster: args.tx_broadcaster,
- latest_block_height: AtomicUsize::new(latest_block_height as usize),
- last_block_hash: Mutex::new(last_block_hash),
- secp_ctx: Secp256k1::new(),
+ best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
channel_state: Mutex::new(ChannelHolder {
by_id,
claimable_htlcs,
pending_msg_events: Vec::new(),
}),
+ pending_inbound_payments: Mutex::new(pending_inbound_payments),
+
our_network_key: args.keys_manager.get_node_secret(),
+ our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
+ secp_ctx,
last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
+ highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
per_peer_state: RwLock::new(per_peer_state),
pending_events: Mutex::new(pending_events_read),
+ pending_background_events: Mutex::new(pending_background_events_read),
total_consistency_lock: RwLock::new(()),
persistence_notifier: PersistenceNotifier::new(),
//TODO: Broadcast channel update for closed channels, but only after we've made a
//connection or two.
- Ok((last_block_hash.clone(), channel_manager))
+ Ok((best_block_hash.clone(), channel_manager))
}
}
}
}
}
+
+#[cfg(all(any(test, feature = "_test_utils"), feature = "unstable"))]
+pub mod bench {
+ use chain::Listen;
+ use chain::chainmonitor::ChainMonitor;
+ use chain::channelmonitor::Persist;
+ use chain::keysinterface::{KeysManager, InMemorySigner};
+ use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
+ use ln::features::{InitFeatures, InvoiceFeatures};
+ use ln::functional_test_utils::*;
+ use ln::msgs::ChannelMessageHandler;
+ use routing::network_graph::NetworkGraph;
+ use routing::router::get_route;
+ use util::test_utils;
+ use util::config::UserConfig;
+ use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+
+ use bitcoin::hashes::Hash;
+ use bitcoin::hashes::sha256::Hash as Sha256;
+ use bitcoin::{Block, BlockHeader, Transaction, TxOut};
+
+ use std::sync::Mutex;
+
+ use test::Bencher;
+
+ struct NodeHolder<'a, P: Persist<InMemorySigner>> {
+ node: &'a ChannelManager<InMemorySigner,
+ &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
+ &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
+ &'a test_utils::TestLogger, &'a P>,
+ &'a test_utils::TestBroadcaster, &'a KeysManager,
+ &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>
+ }
+
+ #[cfg(test)]
+ #[bench]
+ fn bench_sends(bench: &mut Bencher) {
+ bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
+ }
+
+ pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
+ // Do a simple benchmark of sending a payment back and forth between two nodes.
+ // Note that this is unrealistic as each payment send will require at least two fsync
+ // calls per node.
+ let network = bitcoin::Network::Testnet;
+ let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
+
+ let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
+ let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
+
+ let mut config: UserConfig = Default::default();
+ config.own_channel_config.minimum_depth = 1;
+
+ let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
+ let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
+ let seed_a = [1u8; 32];
+ let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
+ let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &logger_a, &keys_manager_a, config.clone(), ChainParameters {
+ network,
+ best_block: BestBlock::from_genesis(network),
+ });
+ let node_a_holder = NodeHolder { node: &node_a };
+
+ let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
+ let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
+ let seed_b = [2u8; 32];
+ let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
+ let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &logger_b, &keys_manager_b, config.clone(), ChainParameters {
+ network,
+ best_block: BestBlock::from_genesis(network),
+ });
+ let node_b_holder = NodeHolder { node: &node_b };
+
+ node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
+ node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
+ node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
+
+ let tx;
+ if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
+ tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ value: 8_000_000, script_pubkey: output_script,
+ }]};
+ node_a.funding_transaction_generated(&temporary_channel_id, tx.clone()).unwrap();
+ } else { panic!(); }
+
+ node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
+ node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
+
+ assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
+
+ let block = Block {
+ header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
+ txdata: vec![tx],
+ };
+ Listen::block_connected(&node_a, &block, 1);
+ Listen::block_connected(&node_b, &block, 1);
+
+ node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
+ node_b.handle_funding_locked(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingLocked, node_b.get_our_node_id()));
+
+ let dummy_graph = NetworkGraph::new(genesis_hash);
+
+ let mut payment_count: u64 = 0;
+ macro_rules! send_payment {
+ ($node_a: expr, $node_b: expr) => {
+ let usable_channels = $node_a.list_usable_channels();
+ let route = get_route(&$node_a.get_our_node_id(), &dummy_graph, &$node_b.get_our_node_id(), Some(InvoiceFeatures::known()),
+ Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), &[], 10_000, TEST_FINAL_CLTV, &logger_a).unwrap();
+
+ let mut payment_preimage = PaymentPreimage([0; 32]);
+ payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
+ payment_count += 1;
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+ let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, 0).unwrap();
+
+ $node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
+ let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
+ $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
+ $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
+ let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
+ $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
+ $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
+ $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
+
+ expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
+ expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
+ assert!($node_b.claim_funds(payment_preimage));
+
+ match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
+ MessageSendEvent::UpdateHTLCs { node_id, updates } => {
+ assert_eq!(node_id, $node_a.get_our_node_id());
+ $node_a.handle_update_fulfill_htlc(&$node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
+ $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &updates.commitment_signed);
+ },
+ _ => panic!("Failed to generate claim event"),
+ }
+
+ let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
+ $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
+ $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
+ $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
+
+ expect_payment_sent!(NodeHolder { node: &$node_a }, payment_preimage);
+ }
+ }
+
+ bench.iter(|| {
+ send_payment!(node_a, node_b);
+ send_payment!(node_b, node_a);
+ });
+ }
+}