use lightning::chain::Watch;
use lightning::ln::channelmanager;
use lightning::ln::channelmanager::{
- ChainParameters, ChannelManagerReadArgs, PaymentHash, PaymentPreimage, PaymentSecret,
- SimpleArcChannelManager,
+ BestBlock, ChainParameters, ChannelManagerReadArgs, SimpleArcChannelManager,
};
use lightning::ln::peer_handler::{MessageHandler, SimpleArcPeerManager};
+use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::routing::network_graph::NetGraphMsgHandler;
use lightning::util::config::UserConfig;
use lightning::util::events::{Event, EventsProvider};
use lightning_net_tokio::SocketDescriptor;
use lightning_persister::FilesystemPersister;
use rand::{thread_rng, Rng};
+use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt;
use std::fs;
use std::sync::{Arc, Mutex};
use std::time::{Duration, SystemTime};
use tokio::sync::mpsc;
-
-#[derive(PartialEq)]
-pub(crate) enum HTLCDirection {
- Inbound,
- Outbound,
-}
+use tokio::sync::mpsc::Receiver;
pub(crate) enum HTLCStatus {
Pending,
pub(crate) struct PaymentInfo {
preimage: Option<PaymentPreimage>,
secret: Option<PaymentSecret>,
- direction: HTLCDirection,
status: HTLCStatus,
amt_msat: MillisatAmount,
}
type ChainMonitor = chainmonitor::ChainMonitor<
InMemorySigner,
- Arc<dyn Filter>,
+ Arc<dyn Filter + Send + Sync>,
Arc<BitcoindClient>,
Arc<BitcoindClient>,
Arc<FilesystemLogger>,
ChainMonitor,
BitcoindClient,
BitcoindClient,
- dyn chain::Access,
+ dyn chain::Access + Send + Sync,
FilesystemLogger,
>;
async fn handle_ldk_events(
channel_manager: Arc<ChannelManager>, chain_monitor: Arc<ChainMonitor>,
bitcoind_client: Arc<BitcoindClient>, keys_manager: Arc<KeysManager>,
- payment_storage: PaymentInfoStorage, network: Network,
+ inbound_payments: PaymentInfoStorage, outbound_payments: PaymentInfoStorage, network: Network,
+ mut event_receiver: Receiver<()>,
) {
loop {
+ let received = event_receiver.recv();
+ if received.await.is_none() {
+ println!("LDK Event channel closed!");
+ return;
+ }
let loop_channel_manager = channel_manager.clone();
let mut events = channel_manager.get_and_clear_pending_events();
events.append(&mut chain_monitor.get_and_clear_pending_events());
.funding_transaction_generated(&temporary_channel_id, final_tx)
.unwrap();
}
- Event::PaymentReceived { payment_hash, .. } => {
- let mut payments = payment_storage.lock().unwrap();
- if let Some(payment) = payments.get_mut(&payment_hash) {
- assert!(loop_channel_manager.claim_funds(
- payment.preimage.unwrap().clone(),
- &payment.secret,
- payment.amt_msat.0.unwrap(),
- ));
- println!(
- "\nEVENT: received payment from payment_hash {} of {} millisatoshis",
- hex_utils::hex_str(&payment_hash.0),
- payment.amt_msat
- );
- print!("> ");
- io::stdout().flush().unwrap();
- payment.status = HTLCStatus::Succeeded;
- } else {
- println!("\nERROR: we received a payment but didn't know the preimage");
- print!("> ");
- io::stdout().flush().unwrap();
- loop_channel_manager.fail_htlc_backwards(&payment_hash, &None);
- payments.insert(
- payment_hash,
- PaymentInfo {
- preimage: None,
- secret: None,
- direction: HTLCDirection::Inbound,
- status: HTLCStatus::Failed,
- amt_msat: MillisatAmount(None),
- },
- );
+ Event::PaymentReceived {
+ payment_hash,
+ payment_preimage,
+ payment_secret,
+ amt,
+ ..
+ } => {
+ let mut payments = inbound_payments.lock().unwrap();
+ let status = match loop_channel_manager.claim_funds(payment_preimage.unwrap()) {
+ true => {
+ println!(
+ "\nEVENT: received payment from payment hash {} of {} millisatoshis",
+ hex_utils::hex_str(&payment_hash.0),
+ amt
+ );
+ print!("> ");
+ io::stdout().flush().unwrap();
+ HTLCStatus::Succeeded
+ }
+ _ => HTLCStatus::Failed,
+ };
+ match payments.entry(payment_hash) {
+ Entry::Occupied(mut e) => {
+ let payment = e.get_mut();
+ payment.status = status;
+ payment.preimage = Some(payment_preimage.unwrap());
+ payment.secret = Some(payment_secret);
+ }
+ Entry::Vacant(e) => {
+ e.insert(PaymentInfo {
+ preimage: Some(payment_preimage.unwrap()),
+ secret: Some(payment_secret),
+ status,
+ amt_msat: MillisatAmount(Some(amt)),
+ });
+ }
}
}
Event::PaymentSent { payment_preimage } => {
let hashed = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
- let mut payments = payment_storage.lock().unwrap();
+ let mut payments = outbound_payments.lock().unwrap();
for (payment_hash, payment) in payments.iter_mut() {
if *payment_hash == hashed {
payment.preimage = Some(payment_preimage);
hex_utils::hex_str(&payment_hash.0)
);
if rejected_by_dest {
- println!("rejected by destination node");
+ println!("re-attempting the payment will not succeed");
} else {
- println!("route failed");
+ println!("payment may be retried");
}
print!("> ");
io::stdout().flush().unwrap();
- let mut payments = payment_storage.lock().unwrap();
+ let mut payments = outbound_payments.lock().unwrap();
if payments.contains_key(&payment_hash) {
let payment = payments.get_mut(&payment_hash).unwrap();
payment.status = HTLCStatus::Failed;
}
};
+ // Check that the bitcoind we've connected to is running the network we expect
+ let bitcoind_chain = bitcoind_client.get_blockchain_info().await.chain;
+ if bitcoind_chain
+ != match args.network {
+ bitcoin::Network::Bitcoin => "main",
+ bitcoin::Network::Testnet => "test",
+ bitcoin::Network::Regtest => "regtest",
+ bitcoin::Network::Signet => "signet",
+ } {
+ println!(
+ "Chain argument ({}) didn't match bitcoind chain ({})",
+ args.network, bitcoind_chain
+ );
+ return;
+ }
+
// ## Setup
// Step 1: Initialize the FeeEstimator
// Step 7: Read ChannelMonitor state from disk
let mut channelmonitors = persister.read_channelmonitors(keys_manager.clone()).unwrap();
- // Step 9: Initialize the ChannelManager
+ // Step 8: Initialize the ChannelManager
let user_config = UserConfig::default();
let mut restarting_node = true;
let (channel_manager_blockhash, mut channel_manager) = {
let chain_params = ChainParameters {
network: args.network,
- latest_hash: getinfo_resp.latest_blockhash,
- latest_height: getinfo_resp.latest_height,
+ best_block: BestBlock::new(
+ getinfo_resp.latest_blockhash,
+ getinfo_resp.latest_height as u32,
+ ),
};
let fresh_channel_manager = channelmanager::ChannelManager::new(
fee_estimator.clone(),
}
};
- // Step 10: Sync ChannelMonitors and ChannelManager to chain tip
+ // Step 9: Sync ChannelMonitors and ChannelManager to chain tip
let mut chain_listener_channel_monitors = Vec::new();
let mut cache = UnboundedCache::new();
let mut chain_tip: Option<poll::ValidatedBlockHeader> = None;
);
}
- // Step 11: Give ChannelMonitors to ChainMonitor
+ // Step 10: Give ChannelMonitors to ChainMonitor
for item in chain_listener_channel_monitors.drain(..) {
let channel_monitor = item.1 .0;
let funding_outpoint = item.2;
chain_monitor.watch_channel(funding_outpoint, channel_monitor).unwrap();
}
- // Step 13: Optional: Initialize the NetGraphMsgHandler
+ // Step 11: Optional: Initialize the NetGraphMsgHandler
// XXX persist routing data
let genesis = genesis_block(args.network).header.block_hash();
- let router =
- Arc::new(NetGraphMsgHandler::new(genesis, None::<Arc<dyn chain::Access>>, logger.clone()));
+ let router = Arc::new(NetGraphMsgHandler::new(
+ genesis,
+ None::<Arc<dyn chain::Access + Send + Sync>>,
+ logger.clone(),
+ ));
- // Step 14: Initialize the PeerManager
+ // Step 12: Initialize the PeerManager
let channel_manager: Arc<ChannelManager> = Arc::new(channel_manager);
let mut ephemeral_bytes = [0; 32];
rand::thread_rng().fill_bytes(&mut ephemeral_bytes);
));
// ## Running LDK
- // Step 16: Initialize Peer Connection Handling
+ // Step 13: Initialize networking
// We poll for events in handle_ldk_events(..) rather than waiting for them over the
// mpsc::channel, so we can leave the event receiver as unused.
- let (event_ntfn_sender, _event_ntfn_receiver) = mpsc::channel(2);
+ let (event_ntfn_sender, event_ntfn_receiver) = mpsc::channel(2);
let peer_manager_connection_handler = peer_manager.clone();
let event_notifier = event_ntfn_sender.clone();
let listening_port = args.ldk_peer_listening_port;
tokio::spawn(async move {
- let listener = std::net::TcpListener::bind(format!("0.0.0.0:{}", listening_port)).unwrap();
+ let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", listening_port))
+ .await
+ .expect("Failed to bind to listen port - is something else already listening on it?");
loop {
- let tcp_stream = listener.accept().unwrap().0;
- lightning_net_tokio::setup_inbound(
- peer_manager_connection_handler.clone(),
- event_notifier.clone(),
- tcp_stream,
- )
- .await;
+ let peer_mgr = peer_manager_connection_handler.clone();
+ let notifier = event_notifier.clone();
+ let tcp_stream = listener.accept().await.unwrap().0;
+ tokio::spawn(async move {
+ lightning_net_tokio::setup_inbound(
+ peer_mgr.clone(),
+ notifier.clone(),
+ tcp_stream.into_std().unwrap(),
+ )
+ .await;
+ });
}
});
- // Step 17: Connect and Disconnect Blocks
+ // Step 14: Connect and Disconnect Blocks
if chain_tip.is_none() {
chain_tip =
Some(init::validate_best_block_header(&mut bitcoind_client.deref()).await.unwrap());
}
});
- // Step 17 & 18: Initialize ChannelManager persistence & Once Per Minute: ChannelManager's
- // timer_chan_freshness_every_min() and PeerManager's timer_tick_occurred
- let data_dir = ldk_data_dir.clone();
- let persist_channel_manager_callback =
- move |node: &ChannelManager| FilesystemPersister::persist_manager(data_dir.clone(), &*node);
- BackgroundProcessor::start(
- persist_channel_manager_callback,
- channel_manager.clone(),
- peer_manager.clone(),
- logger.clone(),
- );
-
// Step 15: Initialize LDK Event Handling
let channel_manager_event_listener = channel_manager.clone();
let chain_monitor_event_listener = chain_monitor.clone();
let keys_manager_listener = keys_manager.clone();
// TODO: persist payment info to disk
- let payment_info: PaymentInfoStorage = Arc::new(Mutex::new(HashMap::new()));
- let payment_info_for_events = payment_info.clone();
+ let inbound_payments: PaymentInfoStorage = Arc::new(Mutex::new(HashMap::new()));
+ let outbound_payments: PaymentInfoStorage = Arc::new(Mutex::new(HashMap::new()));
+ let inbound_pmts_for_events = inbound_payments.clone();
+ let outbound_pmts_for_events = outbound_payments.clone();
let network = args.network;
let bitcoind_rpc = bitcoind_client.clone();
tokio::spawn(async move {
chain_monitor_event_listener,
bitcoind_rpc,
keys_manager_listener,
- payment_info_for_events,
+ inbound_pmts_for_events,
+ outbound_pmts_for_events,
network,
+ event_ntfn_receiver,
)
.await;
});
+ // Step 16 & 17: Persist ChannelManager & Background Processing
+ let data_dir = ldk_data_dir.clone();
+ let persist_channel_manager_callback =
+ move |node: &ChannelManager| FilesystemPersister::persist_manager(data_dir.clone(), &*node);
+ BackgroundProcessor::start(
+ persist_channel_manager_callback,
+ channel_manager.clone(),
+ peer_manager.clone(),
+ logger.clone(),
+ );
+
// Reconnect to channel peers if possible.
let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir.clone());
match disk::read_channel_peer_data(Path::new(&peer_data_path)) {
peer_addr,
peer_manager.clone(),
event_ntfn_sender.clone(),
- );
+ )
+ .await;
}
}
}
Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e),
}
+ // Regularly broadcast our node_announcement. This is only required (or possible) if we have
+ // some public channels, and is only useful if we have public listen address(es) to announce.
+ // In a production environment, this should occur only after the announcement of new channels
+ // to avoid churn in the global network graph.
+ let chan_manager = Arc::clone(&channel_manager);
+ let network = args.network;
+ if args.ldk_announced_listen_addr.is_some() {
+ tokio::spawn(async move {
+ let mut interval = tokio::time::interval(Duration::from_secs(60));
+ loop {
+ interval.tick().await;
+ chan_manager.broadcast_node_announcement(
+ [0; 3],
+ args.ldk_announced_node_name,
+ vec![args.ldk_announced_listen_addr.as_ref().unwrap().clone()],
+ );
+ }
+ });
+ }
+
// Start the CLI.
cli::poll_for_user_input(
peer_manager.clone(),
channel_manager.clone(),
+ keys_manager.clone(),
router.clone(),
- payment_info,
- keys_manager.get_node_secret(),
+ inbound_payments,
+ outbound_payments,
event_ntfn_sender,
ldk_data_dir.clone(),
logger.clone(),
- args.network,
+ network,
)
.await;
}