use bitcoin_bech32::WitnessProgram;
use lightning::chain;
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
-use lightning::chain::chainmonitor;
use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient};
-use lightning::chain::{BestBlock, Filter, Watch};
+use lightning::chain::{chainmonitor, ChannelMonitorUpdateStatus};
+use lightning::chain::{Filter, Watch};
use lightning::ln::channelmanager;
use lightning::ln::channelmanager::{
ChainParameters, ChannelManagerReadArgs, SimpleArcChannelManager,
};
use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager};
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
+use lightning::onion_message::SimpleArcOnionMessenger;
+use lightning::routing::gossip;
+use lightning::routing::gossip::{NodeId, P2PGossipSync};
use lightning::routing::scoring::ProbabilisticScorer;
use lightning::util::config::UserConfig;
use lightning::util::events::{Event, PaymentPurpose};
use lightning::util::ser::ReadableArgs;
-use lightning_background_processor::{BackgroundProcessor, Persister};
+use lightning_background_processor::{BackgroundProcessor, GossipSync};
use lightning_block_sync::init;
use lightning_block_sync::poll;
use lightning_block_sync::SpvClient;
use rand::{thread_rng, Rng};
use std::collections::hash_map::Entry;
use std::collections::HashMap;
+use std::convert::TryInto;
use std::fmt;
use std::fs;
use std::fs::File;
use std::io;
use std::io::Write;
-use std::ops::Deref;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
pub(crate) type ChannelManager =
SimpleArcChannelManager<ChainMonitor, BitcoindClient, BitcoindClient, FilesystemLogger>;
-pub(crate) type InvoicePayer<E> = payment::InvoicePayer<
- Arc<ChannelManager>,
- Router,
- Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph>>>>,
+pub(crate) type InvoicePayer<E> =
+ payment::InvoicePayer<Arc<ChannelManager>, Router, Arc<FilesystemLogger>, E>;
+
+type Router = DefaultRouter<
+ Arc<NetworkGraph>,
Arc<FilesystemLogger>,
- E,
+ Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph>, Arc<FilesystemLogger>>>>,
>;
-type Router = DefaultRouter<Arc<NetworkGraph>, Arc<FilesystemLogger>>;
-
-struct DataPersister {
- data_dir: String,
-}
-
-impl
- Persister<
- InMemorySigner,
- Arc<ChainMonitor>,
- Arc<BitcoindClient>,
- Arc<KeysManager>,
- Arc<BitcoindClient>,
- Arc<FilesystemLogger>,
- > for DataPersister
-{
- fn persist_manager(&self, channel_manager: &ChannelManager) -> Result<(), std::io::Error> {
- FilesystemPersister::persist_manager(self.data_dir.clone(), channel_manager)
- }
+pub(crate) type NetworkGraph = gossip::NetworkGraph<Arc<FilesystemLogger>>;
- fn persist_graph(&self, network_graph: &NetworkGraph) -> Result<(), std::io::Error> {
- if FilesystemPersister::persist_network_graph(self.data_dir.clone(), network_graph).is_err()
- {
- // Persistence errors here are non-fatal as we can just fetch the routing graph
- // again later, but they may indicate a disk error which could be fatal elsewhere.
- eprintln!("Warning: Failed to persist network graph, check your disk and permissions");
- }
-
- Ok(())
- }
-}
+type OnionMessenger = SimpleArcOnionMessenger<FilesystemLogger>;
async fn handle_ldk_events(
- channel_manager: Arc<ChannelManager>, bitcoind_client: Arc<BitcoindClient>,
- keys_manager: Arc<KeysManager>, inbound_payments: PaymentInfoStorage,
- outbound_payments: PaymentInfoStorage, network: Network, event: &Event,
+ channel_manager: &Arc<ChannelManager>, bitcoind_client: &BitcoindClient,
+ network_graph: &NetworkGraph, keys_manager: &KeysManager,
+ inbound_payments: &PaymentInfoStorage, outbound_payments: &PaymentInfoStorage,
+ network: Network, event: &Event,
) {
match event {
Event::FundingGenerationReady {
temporary_channel_id,
+ counterparty_node_id,
channel_value_satoshis,
output_script,
..
encode::deserialize(&hex_utils::to_vec(&signed_tx.hex).unwrap()).unwrap();
// Give the funding transaction back to LDK for opening the channel.
if channel_manager
- .funding_transaction_generated(&temporary_channel_id, final_tx)
+ .funding_transaction_generated(
+ &temporary_channel_id,
+ counterparty_node_id,
+ final_tx,
+ )
.is_err()
{
println!(
io::stdout().flush().unwrap();
}
}
- Event::PaymentReceived { payment_hash, purpose, amt, .. } => {
- let mut payments = inbound_payments.lock().unwrap();
+ Event::PaymentReceived { payment_hash, purpose, amount_msat } => {
+ println!(
+ "\nEVENT: received payment from payment hash {} of {} millisatoshis",
+ hex_utils::hex_str(&payment_hash.0),
+ amount_msat,
+ );
+ print!("> ");
+ io::stdout().flush().unwrap();
+ let payment_preimage = match purpose {
+ PaymentPurpose::InvoicePayment { payment_preimage, .. } => *payment_preimage,
+ PaymentPurpose::SpontaneousPayment(preimage) => Some(*preimage),
+ };
+ channel_manager.claim_funds(payment_preimage.unwrap());
+ }
+ Event::PaymentClaimed { payment_hash, purpose, amount_msat } => {
+ println!(
+ "\nEVENT: claimed payment from payment hash {} of {} millisatoshis",
+ hex_utils::hex_str(&payment_hash.0),
+ amount_msat,
+ );
+ print!("> ");
+ io::stdout().flush().unwrap();
let (payment_preimage, payment_secret) = match purpose {
PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => {
(*payment_preimage, Some(*payment_secret))
}
PaymentPurpose::SpontaneousPayment(preimage) => (Some(*preimage), None),
};
- let status = match channel_manager.claim_funds(payment_preimage.unwrap()) {
- true => {
- println!(
- "\nEVENT: received payment from payment hash {} of {} millisatoshis",
- hex_utils::hex_str(&payment_hash.0),
- amt
- );
- print!("> ");
- io::stdout().flush().unwrap();
- HTLCStatus::Succeeded
- }
- _ => HTLCStatus::Failed,
- };
+ let mut payments = inbound_payments.lock().unwrap();
match payments.entry(*payment_hash) {
Entry::Occupied(mut e) => {
let payment = e.get_mut();
- payment.status = status;
+ payment.status = HTLCStatus::Succeeded;
payment.preimage = payment_preimage;
payment.secret = payment_secret;
}
e.insert(PaymentInfo {
preimage: payment_preimage,
secret: payment_secret,
- status,
- amt_msat: MillisatAmount(Some(*amt)),
+ status: HTLCStatus::Succeeded,
+ amt_msat: MillisatAmount(Some(*amount_msat)),
});
}
}
}
Event::PaymentPathSuccessful { .. } => {}
Event::PaymentPathFailed { .. } => {}
+ Event::ProbeSuccessful { .. } => {}
+ Event::ProbeFailed { .. } => {}
Event::PaymentFailed { payment_hash, .. } => {
print!(
"\nEVENT: Failed to send payment to payment hash {:?}: exhausted payment retry attempts",
payment.status = HTLCStatus::Failed;
}
}
- Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => {
+ Event::PaymentForwarded {
+ prev_channel_id,
+ next_channel_id,
+ fee_earned_msat,
+ claim_from_onchain_tx,
+ } => {
+ let read_only_network_graph = network_graph.read_only();
+ let nodes = read_only_network_graph.nodes();
+ let channels = channel_manager.list_channels();
+
+ let node_str = |channel_id: &Option<[u8; 32]>| match channel_id {
+ None => String::new(),
+ Some(channel_id) => match channels.iter().find(|c| c.channel_id == *channel_id) {
+ None => String::new(),
+ Some(channel) => {
+ match nodes.get(&NodeId::from_pubkey(&channel.counterparty.node_id)) {
+ None => "private node".to_string(),
+ Some(node) => match &node.announcement_info {
+ None => "unnamed node".to_string(),
+ Some(announcement) => {
+ format!("node {}", announcement.alias)
+ }
+ },
+ }
+ }
+ },
+ };
+ let channel_str = |channel_id: &Option<[u8; 32]>| {
+ channel_id
+ .map(|channel_id| format!(" with channel {}", hex_utils::hex_str(&channel_id)))
+ .unwrap_or_default()
+ };
+ let from_prev_str =
+ format!(" from {}{}", node_str(prev_channel_id), channel_str(prev_channel_id));
+ let to_next_str =
+ format!(" to {}{}", node_str(next_channel_id), channel_str(next_channel_id));
+
let from_onchain_str = if *claim_from_onchain_tx {
"from onchain downstream claim"
} else {
};
if let Some(fee_earned) = fee_earned_msat {
println!(
- "\nEVENT: Forwarded payment, earning {} msat {}",
- fee_earned, from_onchain_str
+ "\nEVENT: Forwarded payment{}{}, earning {} msat {}",
+ from_prev_str, to_next_str, fee_earned, from_onchain_str
);
} else {
- println!("\nEVENT: Forwarded payment, claiming onchain {}", from_onchain_str);
+ println!(
+ "\nEVENT: Forwarded payment{}{}, claiming onchain {}",
+ from_prev_str, to_next_str, from_onchain_str
+ );
}
print!("> ");
io::stdout().flush().unwrap();
}
+ Event::HTLCHandlingFailed { .. } => {}
Event::PendingHTLCsForwardable { time_forwardable } => {
let forwarding_channel_manager = channel_manager.clone();
let min = time_forwardable.as_millis() as u64;
// Step 7: Read ChannelMonitor state from disk
let mut channelmonitors = persister.read_channelmonitors(keys_manager.clone()).unwrap();
- // Step 8: Initialize the ChannelManager
+ // Step 8: Poll for the best chain tip, which may be used by the channel manager & spv client
+ let polled_chain_tip = init::validate_best_block_header(bitcoind_client.as_ref())
+ .await
+ .expect("Failed to fetch best block header and best block");
+
+ // Step 9: Initialize the ChannelManager
let mut user_config = UserConfig::default();
- user_config.peer_channel_config_limits.force_announced_channel_preference = false;
+ user_config.channel_handshake_limits.force_announced_channel_preference = false;
let mut restarting_node = true;
let (channel_manager_blockhash, channel_manager) = {
if let Ok(mut f) = fs::File::open(format!("{}/manager", ldk_data_dir.clone())) {
} else {
// We're starting a fresh node.
restarting_node = false;
- let getinfo_resp = bitcoind_client.get_blockchain_info().await;
-
- let chain_params = ChainParameters {
- network: args.network,
- best_block: BestBlock::new(
- getinfo_resp.latest_blockhash,
- getinfo_resp.latest_height as u32,
- ),
- };
+
+ let polled_best_block = polled_chain_tip.to_best_block();
+ let polled_best_block_hash = polled_best_block.block_hash();
+ let chain_params =
+ ChainParameters { network: args.network, best_block: polled_best_block };
let fresh_channel_manager = channelmanager::ChannelManager::new(
fee_estimator.clone(),
chain_monitor.clone(),
user_config,
chain_params,
);
- (getinfo_resp.latest_blockhash, fresh_channel_manager)
+ (polled_best_block_hash, fresh_channel_manager)
}
};
- // Step 9: Sync ChannelMonitors and ChannelManager to chain tip
+ // Step 10: Sync ChannelMonitors and ChannelManager to chain tip
let mut chain_listener_channel_monitors = Vec::new();
let mut cache = UnboundedCache::new();
let mut chain_tip: Option<poll::ValidatedBlockHeader> = None;
if restarting_node {
- let mut chain_listeners =
- vec![(channel_manager_blockhash, &channel_manager as &dyn chain::Listen)];
+ let mut chain_listeners = vec![(
+ channel_manager_blockhash,
+ &channel_manager as &(dyn chain::Listen + Send + Sync),
+ )];
for (blockhash, channel_monitor) in channelmonitors.drain(..) {
let outpoint = channel_monitor.get_funding_txo().0;
}
for monitor_listener_info in chain_listener_channel_monitors.iter_mut() {
- chain_listeners
- .push((monitor_listener_info.0, &monitor_listener_info.1 as &dyn chain::Listen));
+ chain_listeners.push((
+ monitor_listener_info.0,
+ &monitor_listener_info.1 as &(dyn chain::Listen + Send + Sync),
+ ));
}
chain_tip = Some(
init::synchronize_listeners(
- &mut bitcoind_client.deref(),
+ bitcoind_client.as_ref(),
args.network,
&mut cache,
chain_listeners,
);
}
- // Step 10: Give ChannelMonitors to ChainMonitor
+ // Step 11: Give ChannelMonitors to ChainMonitor
for item in chain_listener_channel_monitors.drain(..) {
let channel_monitor = item.1 .0;
let funding_outpoint = item.2;
- chain_monitor.watch_channel(funding_outpoint, channel_monitor).unwrap();
+ assert_eq!(
+ chain_monitor.watch_channel(funding_outpoint, channel_monitor),
+ ChannelMonitorUpdateStatus::Completed
+ );
}
- // Step 11: Optional: Initialize the NetGraphMsgHandler
+ // Step 12: Optional: Initialize the P2PGossipSync
let genesis = genesis_block(args.network).header.block_hash();
let network_graph_path = format!("{}/network_graph", ldk_data_dir.clone());
- let network_graph = Arc::new(disk::read_network(Path::new(&network_graph_path), genesis));
- let network_gossip = Arc::new(NetGraphMsgHandler::new(
+ let network_graph =
+ Arc::new(disk::read_network(Path::new(&network_graph_path), genesis, logger.clone()));
+ let gossip_sync = Arc::new(P2PGossipSync::new(
Arc::clone(&network_graph),
None::<Arc<dyn chain::Access + Send + Sync>>,
logger.clone(),
));
- // Step 12: Initialize the PeerManager
+ // Step 13: Initialize the PeerManager
let channel_manager: Arc<ChannelManager> = Arc::new(channel_manager);
+ let onion_messenger: Arc<OnionMessenger> = Arc::new(OnionMessenger::new(
+ Arc::clone(&keys_manager),
+ Arc::clone(&logger),
+ IgnoringMessageHandler {},
+ ));
let mut ephemeral_bytes = [0; 32];
+ let current_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
rand::thread_rng().fill_bytes(&mut ephemeral_bytes);
let lightning_msg_handler = MessageHandler {
chan_handler: channel_manager.clone(),
- route_handler: network_gossip.clone(),
+ route_handler: gossip_sync.clone(),
+ onion_message_handler: onion_messenger.clone(),
};
let peer_manager: Arc<PeerManager> = Arc::new(PeerManager::new(
lightning_msg_handler,
keys_manager.get_node_secret(Recipient::Node).unwrap(),
+ current_time.try_into().unwrap(),
&ephemeral_bytes,
logger.clone(),
- Arc::new(IgnoringMessageHandler {}),
+ IgnoringMessageHandler {},
));
// ## Running LDK
- // Step 13: Initialize networking
+ // Step 14: Initialize networking
let peer_manager_connection_handler = peer_manager.clone();
let listening_port = args.ldk_peer_listening_port;
- let stop_listen = Arc::new(AtomicBool::new(false));
- let stop_listen_ref = Arc::clone(&stop_listen);
+ let stop_listen_connect = Arc::new(AtomicBool::new(false));
+ let stop_listen = Arc::clone(&stop_listen_connect);
tokio::spawn(async move {
let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", listening_port))
.await
loop {
let peer_mgr = peer_manager_connection_handler.clone();
let tcp_stream = listener.accept().await.unwrap().0;
- if stop_listen_ref.load(Ordering::Acquire) {
+ if stop_listen.load(Ordering::Acquire) {
return;
}
tokio::spawn(async move {
}
});
- // Step 14: Connect and Disconnect Blocks
+ // Step 15: Connect and Disconnect Blocks
if chain_tip.is_none() {
- chain_tip =
- Some(init::validate_best_block_header(&mut bitcoind_client.deref()).await.unwrap());
+ chain_tip = Some(polled_chain_tip);
}
let channel_manager_listener = channel_manager.clone();
let chain_monitor_listener = chain_monitor.clone();
let bitcoind_block_source = bitcoind_client.clone();
let network = args.network;
tokio::spawn(async move {
- let mut derefed = bitcoind_block_source.deref();
- let chain_poller = poll::ChainPoller::new(&mut derefed, network);
+ let chain_poller = poll::ChainPoller::new(bitcoind_block_source.as_ref(), network);
let chain_listener = (chain_monitor_listener, channel_manager_listener);
let mut spv_client =
SpvClient::new(chain_tip.unwrap(), chain_poller, &mut cache, &chain_listener);
}
});
- // Step 15: Handle LDK Events
+ // Step 16: Handle LDK Events
let channel_manager_event_listener = channel_manager.clone();
let keys_manager_listener = keys_manager.clone();
// TODO: persist payment info to disk
let outbound_pmts_for_events = outbound_payments.clone();
let network = args.network;
let bitcoind_rpc = bitcoind_client.clone();
+ let network_graph_events = network_graph.clone();
let handle = tokio::runtime::Handle::current();
let event_handler = move |event: &Event| {
handle.block_on(handle_ldk_events(
- channel_manager_event_listener.clone(),
- bitcoind_rpc.clone(),
- keys_manager_listener.clone(),
- inbound_pmts_for_events.clone(),
- outbound_pmts_for_events.clone(),
+ &channel_manager_event_listener,
+ &bitcoind_rpc,
+ &network_graph_events,
+ &keys_manager_listener,
+ &inbound_pmts_for_events,
+ &outbound_pmts_for_events,
network,
event,
));
};
- // Step 16: Initialize routing ProbabilisticScorer
- let scorer_path = format!("{}/prob_scorer", ldk_data_dir.clone());
+ // Step 17: Initialize routing ProbabilisticScorer
+ let scorer_path = format!("{}/scorer", ldk_data_dir.clone());
let scorer = Arc::new(Mutex::new(disk::read_scorer(
Path::new(&scorer_path),
Arc::clone(&network_graph),
+ Arc::clone(&logger),
)));
- let scorer_persist = Arc::clone(&scorer);
- tokio::spawn(async move {
- let mut interval = tokio::time::interval(Duration::from_secs(600));
- loop {
- interval.tick().await;
- if disk::persist_scorer(Path::new(&scorer_path), &scorer_persist.lock().unwrap())
- .is_err()
- {
- // Persistence errors here are non-fatal as channels will be re-scored as payments
- // fail, but they may indicate a disk error which could be fatal elsewhere.
- eprintln!("Warning: Failed to persist scorer, check your disk and permissions");
- }
- }
- });
- // Step 17: Create InvoicePayer
+ // Step 18: Create InvoicePayer
let router = DefaultRouter::new(
network_graph.clone(),
logger.clone(),
keys_manager.get_secure_random_bytes(),
+ scorer.clone(),
);
let invoice_payer = Arc::new(InvoicePayer::new(
channel_manager.clone(),
router,
- scorer.clone(),
logger.clone(),
event_handler,
- payment::RetryAttempts(5),
+ payment::Retry::Timeout(Duration::from_secs(10)),
));
- // Step 18: Persist ChannelManager and NetworkGraph
- let persister = DataPersister { data_dir: ldk_data_dir.clone() };
+ // Step 19: Persist ChannelManager and NetworkGraph
+ let persister = Arc::new(FilesystemPersister::new(ldk_data_dir.clone()));
- // Step 19: Background Processing
+ // Step 20: Background Processing
let background_processor = BackgroundProcessor::start(
persister,
invoice_payer.clone(),
chain_monitor.clone(),
channel_manager.clone(),
- Some(network_gossip.clone()),
+ GossipSync::p2p(gossip_sync.clone()),
peer_manager.clone(),
logger.clone(),
+ Some(scorer.clone()),
);
// Regularly reconnect to channel peers.
let connect_cm = Arc::clone(&channel_manager);
let connect_pm = Arc::clone(&peer_manager);
let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir.clone());
+ let stop_connect = Arc::clone(&stop_listen_connect);
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(1));
loop {
.map(|chan| chan.counterparty.node_id)
.filter(|id| !peers.contains(id))
{
+ if stop_connect.load(Ordering::Acquire) {
+ return;
+ }
for (pubkey, peer_addr) in info.iter() {
if *pubkey == node_id {
let _ = cli::do_connect_peer(
// some public channels, and is only useful if we have public listen address(es) to announce.
// In a production environment, this should occur only after the announcement of new channels
// to avoid churn in the global network graph.
- let chan_manager = Arc::clone(&channel_manager);
+ let peer_man = Arc::clone(&peer_manager);
let network = args.network;
if !args.ldk_announced_listen_addr.is_empty() {
tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(60));
loop {
interval.tick().await;
- chan_manager.broadcast_node_announcement(
+ peer_man.broadcast_node_announcement(
[0; 3],
args.ldk_announced_node_name,
args.ldk_announced_listen_addr.clone(),
Arc::clone(&channel_manager),
Arc::clone(&keys_manager),
Arc::clone(&network_graph),
+ Arc::clone(&onion_messenger),
inbound_payments,
outbound_payments,
ldk_data_dir.clone(),
network,
+ Arc::clone(&logger),
)
.await;
// Disconnect our peers and stop accepting new connections. This ensures we don't continue
// updating our channel data after we've stopped the background processor.
- stop_listen.store(true, Ordering::Release);
+ stop_listen_connect.store(true, Ordering::Release);
peer_manager.disconnect_all_peers();
// Stop the background processor.
#[tokio::main]
pub async fn main() {
+ #[cfg(not(target_os = "windows"))]
+ {
+ // Catch Ctrl-C with a dummy signal handler.
+ unsafe {
+ let mut new_action: libc::sigaction = core::mem::zeroed();
+ let mut old_action: libc::sigaction = core::mem::zeroed();
+
+ extern "C" fn dummy_handler(
+ _: libc::c_int, _: *const libc::siginfo_t, _: *const libc::c_void,
+ ) {
+ }
+
+ new_action.sa_sigaction = dummy_handler as libc::sighandler_t;
+ new_action.sa_flags = libc::SA_SIGINFO;
+
+ libc::sigaction(
+ libc::SIGINT,
+ &new_action as *const libc::sigaction,
+ &mut old_action as *mut libc::sigaction,
+ );
+ }
+ }
+
start_ldk().await;
}