//! Traits and utility impls which allow other parts of rust-lightning to interact with the
-//! blockchain - receiving notifications of new blocks and block disconnections and allowing
-//! rust-lightning to request that you monitor the chain for certain outpoints/transactions.
+//! blockchain.
+//!
+//! Includes traits for monitoring and receiving notifications of new blocks and block
+//! disconnections, transactio broadcasting, and feerate information requests.
use bitcoin::blockdata::block::{Block, BlockHeader};
use bitcoin::blockdata::transaction::Transaction;
/// An interface to request notification of certain scripts as they appear the
/// chain.
+///
/// Note that all of the functions implemented here *must* be reentrant-safe (obviously - they're
/// called from inside the library in response to ChainListener events, P2P events, or timer
/// events).
/// Note that if a new transaction/outpoint is watched during a block_connected call, the block
/// *must* be re-scanned with the new transaction/outpoints and block_connected should be
/// called again with the same header and (at least) the new transactions.
+ ///
/// Note that if non-new transaction/outpoints may be registered during a call, a second call
/// *must not* happen.
+ ///
/// This also means those counting confirmations using block_connected callbacks should watch
/// for duplicate headers and not count them towards confirmations!
fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]);
/// A trait which should be implemented to provide feerate information on a number of time
/// horizons.
+///
/// Note that all of the functions implemented here *must* be reentrant-safe (obviously - they're
/// called from inside the library in response to ChainListener events, P2P events, or timer
/// events).
pub trait FeeEstimator: Sync + Send {
- /// Gets estimated satoshis of fee required per 1000 Weight-Units. This translates to:
- /// * satoshis-per-byte * 250
- /// * ceil(satoshis-per-kbyte / 4)
+ /// Gets estimated satoshis of fee required per 1000 Weight-Units.
+ ///
/// Must be no smaller than 253 (ie 1 satoshi-per-byte rounded up to ensure later round-downs
/// don't put us below 1 satoshi-per-byte).
+ ///
+ /// This translates to:
+ /// * satoshis-per-byte * 250
+ /// * ceil(satoshis-per-kbyte / 4)
fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u64;
}
}
/// Utility to capture some common parts of ChainWatchInterface implementors.
+///
/// Keeping a local copy of this in a ChainWatchInterface implementor is likely useful.
pub struct ChainWatchInterfaceUtil {
network: Network,
}
/// Notify listeners that a block was connected given a full, unfiltered block.
+ ///
/// Handles re-scanning the block and calling block_connected again if listeners register new
/// watch data during the callbacks for you (see ChainListener::block_connected for more info).
pub fn block_connected_with_filtering(&self, block: &Block, height: u32) {
/// Notify listeners that a block was connected, given pre-filtered list of transactions in the
/// block which matched the filter (probably using does_match_tx).
+ ///
/// Returns true if notified listeners registered additional watch data (implying that the
/// block must be re-scanned and this function called again prior to further block_connected
/// calls, see ChainListener::block_connected for more info).
-//! Module provides structs and traits which allow other parts of rust-lightning to interact with
-//! the blockchain.
+//! Structs and traits which allow other parts of rust-lightning to interact with the blockchain.
pub mod chaininterface;
pub mod transaction;
use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
/// A reference to a transaction output.
+///
/// Differs from bitcoin::blockdata::transaction::OutPoint as the index is a u16 instead of u32
/// due to LN's restrictions on index values. Should reduce (possibly) unsafe conversions this way.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash)]
//! The top-level channel management and payment tracking stuff lives here.
+//!
//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
//! upon reconnect to the relevant peer(s).
+//!
//! It does not manage routing logic (see ln::router for that) nor does it manage constructing
//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
//! imply it needs to fail HTLCs/payments/channels it manages).
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
+///
/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
/// to individual Channels.
pub struct ChannelManager {
}
impl ChannelManager {
- /// Constructs a new ChannelManager to hold several channels and route between them. This is
- /// the main "logic hub" for all channel-related actions, and implements ChannelMessageHandler.
+ /// Constructs a new ChannelManager to hold several channels and route between them.
+ ///
+ /// This is the main "logic hub" for all channel-related actions, and implements
+ /// ChannelMessageHandler.
+ ///
/// fee_proportional_millionths is an optional fee to charge any payments routed through us.
/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
+ ///
/// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
pub fn new(our_network_key: SecretKey, fee_proportional_millionths: u32, announce_channels_publicly: bool, network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>) -> Result<Arc<ChannelManager>, secp256k1::Error> {
let secp_ctx = Secp256k1::new();
}
/// Creates a new outbound channel to the given remote node and with the given value.
+ ///
/// user_id will be provided back as user_channel_id in FundingGenerationReady and
/// FundingBroadcastSafe events to allow tracking of which events correspond with which
/// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
/// may wish to avoid using 0 for user_id here.
+ ///
/// If successful, will generate a SendOpenChannel event, so you should probably poll
/// PeerManager::process_events afterwards.
+ ///
/// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat being greater than channel_value_satoshis * 1k
pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
let chan_keys = if cfg!(feature = "fuzztarget") {
/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
/// will be accepted on the given channel, and after additional timeout/the closing of all
/// pending HTLCs, the channel will be closed on chain.
+ ///
/// May generate a SendShutdown event on success, which should be relayed.
pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), HandleError> {
let (mut res, node_id, chan_option) = {
}
/// Sends a payment along a given route.
+ ///
/// Value parameters are provided via the last hop in route, see documentation for RouteHop
/// fields for more info.
+ ///
/// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
/// payment), we don't do anything to stop you! We always try to ensure that if the provided
/// next hop knows the preimage to payment_hash they can claim an additional amount as
/// specified in the last hop in the route! Thus, you should probably do your own
/// payment_preimage tracking (which you should already be doing as they represent "proof of
/// payment") and prevent double-sends yourself.
- /// See-also docs on Channel::send_htlc_and_commit.
+ ///
/// May generate a SendHTLCs event on success, which should be relayed.
+ ///
/// Raises APIError::RoutError when invalid route or forward parameter
/// (cltv_delta, fee, node public key) is specified
pub fn send_payment(&self, route: Route, payment_hash: [u8; 32]) -> Result<(), APIError> {
}
/// Call this upon creation of a funding transaction for the given channel.
+ ///
/// Panics if a funding transaction has already been provided for this channel.
+ ///
/// May panic if the funding_txo is duplicative with some other channel (note that this should
/// be trivially prevented by using unique funding transaction keys per-channel).
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
}
/// Processes HTLCs which are pending waiting on random forward delay.
+ ///
/// Should only really ever be called in response to an PendingHTLCsForwardable event.
/// Will likely generate further events.
pub fn process_pending_htlc_forwards(&self) {
/// Provides a payment preimage in response to a PaymentReceived event, returning true and
/// generating message events for the net layer to claim the payment, if possible. Thus, you
/// should probably kick the net layer to go send messages if this returns true!
+ ///
/// May panic if called except in response to a PaymentReceived event.
pub fn claim_funds(&self, payment_preimage: [u8; 32]) -> bool {
let mut sha = Sha256::new();
//! The logic to monitor for on-chain transactions and create the relevant claim responses lives
//! here.
+//!
//! ChannelMonitor objects are generated by ChannelManager in response to relevant
//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
//! be made in responding to certain messages, see ManyChannelMonitor for more.
+//!
//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
pub enum ChannelMonitorUpdateErr {
/// Used to indicate a temporary failure (eg connection to a watchtower failed, but is expected
/// to succeed at some point in the future).
+ ///
/// Such a failure will "freeze" a channel, preventing us from revoking old states or
/// submitting new commitment transactions to the remote party.
/// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore
/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
/// events to it, while also taking any add_update_monitor events and passing them to some remote
/// server(s).
+///
/// Note that any updates to a channel's monitor *must* be applied to each instance of the
/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
/// which we have revoked, allowing our counterparty to claim all funds in the channel!
pub trait ManyChannelMonitor: Send + Sync {
/// Adds or updates a monitor for the given `funding_txo`.
+ ///
/// Implementor must also ensure that the funding_txo outpoint is registered with any relevant
/// ChainWatchInterfaces such that the provided monitor receives block_connected callbacks with
/// any spends of it.
/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
/// watchtower or watch our own channels.
+///
/// Note that you must provide your own key by which to refer to channels.
+///
/// If you're accepting remote monitors (ie are implementing a watchtower), you must verify that
/// users cannot overwrite a given channel by providing a duplicate key. ie you should probably
/// index by a PublicKey which is required to sign any updates.
+///
/// If you're using this for local monitoring of your own channels, you probably want to use
/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation.
pub struct SimpleManyChannelMonitor<Key> {
/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
/// on-chain transactions to ensure no loss of funds occurs.
+///
/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
/// information and are actively monitoring the chain.
pub struct ChannelMonitor {
//! High level lightning structs and impls live here.
+//!
//! You probably want to create a channelmanager::ChannelManager, and a router::Router first.
//! Then, you probably want to pass them both on to a peer_handler::PeerManager and use that to
//! create/manage connections and call get_and_clear_pending_events after each action, handling
//! them appropriately.
+//!
//! When you want to open/close a channel or send a payment, call into your ChannelManager and when
//! you want to learn things about the network topology (eg get a route for sending a payment),
//! call into your Router.
//! Wire messages, traits representing wire message handlers, and a few error types live here.
+//!
//! For a normal node you probably don't need to use anything here, however, if you wish to split a
//! node into an internet-facing route/message socket handling daemon and a separate daemon (or
//! server entirely) which handles only channel-related messages you may wish to implement
//! ChannelMessageHandler yourself and use it to re-serialize messages and pass them across
//! daemons/servers.
+//!
//! Note that if you go with such an architecture (instead of passing raw socket events to a
//! non-internet-facing system) you trust the frontend internet-facing system to not lie about the
//! source node_id of the mssage, however this does allow you to significantly reduce bandwidth
},
}
-/// A trait to describe an object which can receive channel messages. Messages MAY be called in
-/// parallel when they originate from different their_node_ids, however they MUST NOT be called in
-/// parallel when the two calls have the same their_node_id.
+/// A trait to describe an object which can receive channel messages.
+///
+/// Messages MAY be called in parallel when they originate from different their_node_ids, however
+/// they MUST NOT be called in parallel when the two calls have the same their_node_id.
pub trait ChannelMessageHandler : events::EventsProvider + Send + Sync {
//Channel init:
/// Handle an incoming open_channel message from the given peer.
//! Top level peer message handling and socket handling logic lives here.
+//!
//! Instead of actually servicing sockets ourselves we require that you implement the
//! SocketDescriptor interface and use that to receive actions which you should perform on the
//! socket, and call into PeerManager with bytes read from the socket. The PeerManager will then
/// Provides an object which can be used to send data to and which uniquely identifies a connection
/// to a remote host. You will need to be able to generate multiple of these which meet Eq and
/// implement Hash to meet the PeerManager API.
+///
/// For efficiency, Clone should be relatively cheap for this type.
+///
/// You probably want to just extend an int and put a file descriptor in a struct and implement
/// send_data. Note that if you are using a higher-level net library that may close() itself, be
/// careful to ensure you don't have races whereby you might register a new connection with an fd
/// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected.
/// Note that in the disconnected case, a disconnect_event must still fire and further write
/// attempts may occur until that time.
+ ///
/// If the returned size is smaller than data.len() - write_offset, a write_available event must
/// trigger the next time more data can be written. Additionally, until the a send_data event
/// completes fully, no further read_events should trigger on the same peer!
+ ///
/// If a read_event on this descriptor had previously returned true (indicating that read
/// events should be paused to prevent DoS in the send buffer), resume_read may be set
/// indicating that read events on this descriptor should resume. A resume_read of false does
}
/// Get the list of node ids for peers which have completed the initial handshake.
+ ///
/// For outbound connections, this will be the same as the their_node_id parameter passed in to
/// new_outbound_connection, however entries will only appear once the initial handshake has
/// completed and we are sure the remote peer has the private key for the given node_id.
/// Indicates a new outbound connection has been established to a node with the given node_id.
/// Note that if an Err is returned here you MUST NOT call disconnect_event for the new
/// descriptor but must disconnect the connection immediately.
+ ///
/// Returns some bytes to send to the remote node.
+ ///
/// Panics if descriptor is duplicative with some other descriptor which has not yet has a
/// disconnect_event.
pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result<Vec<u8>, PeerHandleError> {
}
/// Indicates a new inbound connection has been established.
+ ///
/// May refuse the connection by returning an Err, but will never write bytes to the remote end
/// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
/// call disconnect_event for the new descriptor but must disconnect the connection
/// immediately.
+ ///
/// Panics if descriptor is duplicative with some other descriptor which has not yet has a
/// disconnect_event.
pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> {
}
/// Indicates that there is room to write data to the given socket descriptor.
+ ///
/// May return an Err to indicate that the connection should be closed.
+ ///
/// Will most likely call send_data on the descriptor passed in (or the descriptor handed into
- /// new_*_connection) before returning. Thus, be very careful with reentrancy issues! The
+ /// new_*\_connection) before returning. Thus, be very careful with reentrancy issues! The
/// invariants around calling write_event in case a write did not fully complete must still
/// hold - be ready to call write_event again if a write call generated here isn't sufficient!
- /// Panics if the descriptor was not previously registered in a new_*_connection event.
+ /// Panics if the descriptor was not previously registered in a new_\*_connection event.
pub fn write_event(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
let mut peers = self.peers.lock().unwrap();
match peers.peers.get_mut(descriptor) {
}
/// Indicates that data was read from the given socket descriptor.
+ ///
/// May return an Err to indicate that the connection should be closed.
+ ///
/// Will very likely call send_data on the descriptor passed in (or a descriptor handed into
/// new_*_connection) before returning. Thus, be very careful with reentrancy issues! The
/// invariants around calling write_event in case a write did not fully complete must still
/// hold. Note that this function will often call send_data on many peers before returning, not
/// just this peer!
+ ///
/// If Ok(true) is returned, further read_events should not be triggered until a write_event on
/// this file descriptor has resume_read set (preventing DoS issues in the send buffer). Note
/// that this must be true even if a send_data call with resume_read=true was made during the
/// course of this function!
+ ///
/// Panics if the descriptor was not previously registered in a new_*_connection event.
pub fn read_event(&self, peer_descriptor: &mut Descriptor, data: Vec<u8>) -> Result<bool, PeerHandleError> {
match self.do_read_event(peer_descriptor, data) {
}
/// Indicates that the given socket descriptor's connection is now closed.
+ ///
/// This must be called even if a PeerHandleError was given for a read_event or write_event,
- /// but must NOT be called if a PeerHandleError was provided out of a new_*_connection event!
+ /// but must NOT be called if a PeerHandleError was provided out of a new_\*\_connection event!
+ ///
/// Panics if the descriptor was not previously registered in a successful new_*_connection event.
pub fn disconnect_event(&self, descriptor: &Descriptor) {
self.disconnect_event_internal(descriptor, false);
//! The top-level routing/network map tracking logic lives here.
+//!
//! You probably want to create a Router and use that as your RoutingMessageHandler and then
//! interrogate it to get routes for your own payments.
}
/// Gets a route from us to the given target node.
+ ///
/// Extra routing hops between known nodes and the target will be used if they are included in
/// last_hops.
+ ///
/// If some channels aren't announced, it may be useful to fill in a first_hops with the
/// results from a local ChannelManager::list_usable_channels() call. If it is filled in, our
/// (this Router's) view of our local channels will be ignored, and only those in first_hops
- /// will be used. Panics if first_hops contains channels without short_channel_ids
+ /// will be used.
+ ///
+ /// Panics if first_hops contains channels without short_channel_ids
/// (ChannelManager::list_usable_channels will never include such channels).
+ ///
/// The fees on channels from us to next-hops are ignored (as they are assumed to all be
/// equal), however the enabled/disabled bit on such channels as well as the htlc_minimum_msat
/// *is* checked as they may change based on the receiving node.
//! Events are returned from various bits in the library which indicate some action must be taken
//! by the client.
+//!
//! Because we don't have a built-in runtime, its up to the client to call events at a time in the
//! future, as well as generate and broadcast funding transactions handle payment preimages and a
//! few other things.
// TODO: Move these into a separate struct and make a top-level enum
/// Used to indicate that we've initialted a channel open and should send the open_channel
/// message provided to the given peer.
+ ///
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
SendOpenChannel {
/// The node_id of the node which should receive this message
msg: msgs::OpenChannel,
},
/// Used to indicate that a funding_created message should be sent to the peer with the given node_id.
+ ///
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
SendFundingCreated {
/// The node_id of the node which should receive this message
msg: msgs::FundingCreated,
},
/// Used to indicate that a funding_locked message should be sent to the peer with the given node_id.
+ ///
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
SendFundingLocked {
/// The node_id of the node which should receive these message(s)
},
/// Used to indicate that a series of HTLC update messages, as well as a commitment_signed
/// message should be sent to the peer with the given node_id.
+ ///
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
UpdateHTLCs {
/// The node_id of the node which should receive these message(s)
updates: msgs::CommitmentUpdate,
},
/// Used to indicate that a shutdown message should be sent to the peer with the given node_id.
+ ///
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
SendShutdown {
/// The node_id of the node which should receive this message
},
/// Used to indicate that a channel_announcement and channel_update should be broadcast to all
/// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2).
+ ///
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
BroadcastChannelAnnouncement {
/// The channel_announcement which should be sent.
update_msg: msgs::ChannelUpdate,
},
/// Used to indicate that a channel_update should be broadcast to all peers.
+ ///
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
BroadcastChannelUpdate {
/// The channel_update which should be sent.
//Error handling
/// Broadcast an error downstream to be handled
+ ///
/// This event is handled by PeerManager::process_events if you are using a PeerManager.
HandleError {
/// The node_id of the node which should receive this message