X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fmain.rs;h=39237d844d6969ddc341d8b5c2f802b423f4ba51;hb=7d0af178ba9a79008af7b3ea89e563749f8f7252;hp=b81e652b1906aad8012cc29b2b103cbdba6295d3;hpb=cd4e88ff77a35a586bbb03f1059870244dbea52f;p=ldk-sample diff --git a/src/main.rs b/src/main.rs index b81e652..39237d8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -24,7 +24,8 @@ use lightning::ln::channelmanager::{ }; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; -use lightning::routing::network_graph::NetGraphMsgHandler; +use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; +use lightning::routing::scorer::Scorer; use lightning::util::config::UserConfig; use lightning::util::events::{Event, PaymentPurpose}; use lightning::util::ser::ReadableArgs; @@ -33,6 +34,8 @@ use lightning_block_sync::init; use lightning_block_sync::poll; use lightning_block_sync::SpvClient; use lightning_block_sync::UnboundedCache; +use lightning_invoice::payment; +use lightning_invoice::utils::DefaultRouter; use lightning_net_tokio::SocketDescriptor; use lightning_persister::FilesystemPersister; use rand::{thread_rng, Rng}; @@ -95,6 +98,16 @@ pub(crate) type PeerManager = SimpleArcPeerManager< pub(crate) type ChannelManager = SimpleArcChannelManager; +pub(crate) type InvoicePayer = payment::InvoicePayer< + Arc, + Router, + Arc>, + Arc, + E, +>; + +type Router = DefaultRouter, Arc>; + async fn handle_ldk_events( channel_manager: Arc, bitcoind_client: Arc, keys_manager: Arc, inbound_payments: PaymentInfoStorage, @@ -203,9 +216,7 @@ async fn handle_ldk_events( Event::PaymentPathFailed { payment_hash, rejected_by_dest, - network_update: _, all_paths_failed, - path: _, short_channel_id, .. } => { @@ -220,7 +231,7 @@ async fn handle_ldk_events( if *rejected_by_dest { println!(": re-attempting the payment will not succeed"); } else { - println!(": payment may be retried"); + println!(": exhausted payment retry attempts"); } print!("> "); io::stdout().flush().unwrap(); @@ -584,14 +595,45 @@ async fn start_ldk() { event, )); }; - // Step 16: Persist ChannelManager + + // Step 16: Initialize routing Scorer + let scorer_path = format!("{}/scorer", ldk_data_dir.clone()); + let scorer = Arc::new(Mutex::new(disk::read_scorer(Path::new(&scorer_path)))); + let scorer_persist = Arc::clone(&scorer); + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(600)); + loop { + interval.tick().await; + if disk::persist_scorer(Path::new(&scorer_path), &scorer_persist.lock().unwrap()) + .is_err() + { + // Persistence errors here are non-fatal as channels will be re-scored as payments + // fail, but they may indicate a disk error which could be fatal elsewhere. + eprintln!("Warning: Failed to persist scorer, check your disk and permissions"); + } + } + }); + + // Step 17: Create InvoicePayer + let router = DefaultRouter::new(network_graph.clone(), logger.clone()); + let invoice_payer = Arc::new(InvoicePayer::new( + channel_manager.clone(), + router, + scorer.clone(), + logger.clone(), + event_handler, + payment::RetryAttempts(5), + )); + + // Step 18: Persist ChannelManager let data_dir = ldk_data_dir.clone(); let persist_channel_manager_callback = move |node: &ChannelManager| FilesystemPersister::persist_manager(data_dir.clone(), &*node); - // Step 17: Background Processing + + // Step 19: Background Processing let background_processor = BackgroundProcessor::start( persist_channel_manager_callback, - event_handler, + invoice_payer.clone(), chain_monitor.clone(), channel_manager.clone(), Some(network_gossip.clone()), @@ -599,22 +641,39 @@ async fn start_ldk() { logger.clone(), ); - // Reconnect to channel peers if possible. + // Regularly reconnect to channel peers. + let connect_cm = Arc::clone(&channel_manager); + let connect_pm = Arc::clone(&peer_manager); let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir.clone()); - match disk::read_channel_peer_data(Path::new(&peer_data_path)) { - Ok(mut info) => { - for (pubkey, peer_addr) in info.drain() { - for chan_info in channel_manager.list_channels() { - if pubkey == chan_info.counterparty.node_id { - let _ = - cli::connect_peer_if_necessary(pubkey, peer_addr, peer_manager.clone()) + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(1)); + loop { + interval.tick().await; + match disk::read_channel_peer_data(Path::new(&peer_data_path)) { + Ok(info) => { + let peers = connect_pm.get_peer_node_ids(); + for node_id in connect_cm + .list_channels() + .iter() + .map(|chan| chan.counterparty.node_id) + .filter(|id| !peers.contains(id)) + { + for (pubkey, peer_addr) in info.iter() { + if *pubkey == node_id { + let _ = cli::do_connect_peer( + *pubkey, + peer_addr.clone(), + Arc::clone(&connect_pm), + ) .await; + } + } } } + Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e), } } - Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e), - } + }); // Regularly broadcast our node_announcement. This is only required (or possible) if we have // some public channels, and is only useful if we have public listen address(es) to announce. @@ -638,10 +697,12 @@ async fn start_ldk() { // Start the CLI. cli::poll_for_user_input( + invoice_payer.clone(), peer_manager.clone(), channel_manager.clone(), keys_manager.clone(), network_graph.clone(), + scorer.clone(), inbound_payments, outbound_payments, ldk_data_dir.clone(),