Print our peers' aliases in `listchannels`
[ldk-sample] / src / main.rs
index b81e652b1906aad8012cc29b2b103cbdba6295d3..ad48efd4b1d9afcb45c3ad9d989411bc23871790 100644 (file)
@@ -16,7 +16,7 @@ use bitcoin_bech32::WitnessProgram;
 use lightning::chain;
 use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
 use lightning::chain::chainmonitor;
-use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager};
+use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient};
 use lightning::chain::{BestBlock, Filter, Watch};
 use lightning::ln::channelmanager;
 use lightning::ln::channelmanager::{
@@ -24,7 +24,8 @@ use lightning::ln::channelmanager::{
 };
 use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager};
 use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-use lightning::routing::network_graph::NetGraphMsgHandler;
+use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
+use lightning::routing::scoring::ProbabilisticScorer;
 use lightning::util::config::UserConfig;
 use lightning::util::events::{Event, PaymentPurpose};
 use lightning::util::ser::ReadableArgs;
@@ -33,6 +34,8 @@ use lightning_block_sync::init;
 use lightning_block_sync::poll;
 use lightning_block_sync::SpvClient;
 use lightning_block_sync::UnboundedCache;
+use lightning_invoice::payment;
+use lightning_invoice::utils::DefaultRouter;
 use lightning_net_tokio::SocketDescriptor;
 use lightning_persister::FilesystemPersister;
 use rand::{thread_rng, Rng};
@@ -45,6 +48,7 @@ use std::io;
 use std::io::Write;
 use std::ops::Deref;
 use std::path::Path;
+use std::sync::atomic::{AtomicBool, Ordering};
 use std::sync::{Arc, Mutex};
 use std::time::{Duration, SystemTime};
 
@@ -95,6 +99,16 @@ pub(crate) type PeerManager = SimpleArcPeerManager<
 pub(crate) type ChannelManager =
        SimpleArcChannelManager<ChainMonitor, BitcoindClient, BitcoindClient, FilesystemLogger>;
 
+pub(crate) type InvoicePayer<E> = payment::InvoicePayer<
+       Arc<ChannelManager>,
+       Router,
+       Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph>>>>,
+       Arc<FilesystemLogger>,
+       E,
+>;
+
+type Router = DefaultRouter<Arc<NetworkGraph>, Arc<FilesystemLogger>>;
+
 async fn handle_ldk_events(
        channel_manager: Arc<ChannelManager>, bitcoind_client: Arc<BitcoindClient>,
        keys_manager: Arc<KeysManager>, inbound_payments: PaymentInfoStorage,
@@ -115,7 +129,7 @@ async fn handle_ldk_events(
                                        Network::Bitcoin => bitcoin_bech32::constants::Network::Bitcoin,
                                        Network::Testnet => bitcoin_bech32::constants::Network::Testnet,
                                        Network::Regtest => bitcoin_bech32::constants::Network::Regtest,
-                                       Network::Signet => panic!("Signet unsupported"),
+                                       Network::Signet => bitcoin_bech32::constants::Network::Signet,
                                },
                        )
                        .expect("Lightning funding tx should always be to a SegWit output")
@@ -182,16 +196,21 @@ async fn handle_ldk_events(
                                }
                        }
                }
-               Event::PaymentSent { payment_preimage, payment_hash, .. } => {
+               Event::PaymentSent { payment_preimage, payment_hash, fee_paid_msat, .. } => {
                        let mut payments = outbound_payments.lock().unwrap();
                        for (hash, payment) in payments.iter_mut() {
                                if *hash == *payment_hash {
                                        payment.preimage = Some(*payment_preimage);
                                        payment.status = HTLCStatus::Succeeded;
                                        println!(
-                                               "\nEVENT: successfully sent payment of {} millisatoshis from \
+                                               "\nEVENT: successfully sent payment of {} millisatoshis{} from \
                                                                 payment hash {:?} with preimage {:?}",
                                                payment.amt_msat,
+                                               if let Some(fee) = fee_paid_msat {
+                                                       format!(" (fee {} msat)", fee)
+                                               } else {
+                                                       "".to_string()
+                                               },
                                                hex_utils::hex_str(&payment_hash.0),
                                                hex_utils::hex_str(&payment_preimage.0)
                                        );
@@ -200,28 +219,16 @@ async fn handle_ldk_events(
                                }
                        }
                }
-               Event::PaymentPathFailed {
-                       payment_hash,
-                       rejected_by_dest,
-                       network_update: _,
-                       all_paths_failed,
-                       path: _,
-                       short_channel_id,
-                       ..
-               } => {
+               Event::OpenChannelRequest { .. } => {
+                       // Unreachable, we don't set manually_accept_inbound_channels
+               }
+               Event::PaymentPathSuccessful { .. } => {}
+               Event::PaymentPathFailed { .. } => {}
+               Event::PaymentFailed { payment_hash, .. } => {
                        print!(
-                               "\nEVENT: Failed to send payment{} to payment hash {:?}",
-                               if *all_paths_failed { "" } else { " along MPP path" },
+                               "\nEVENT: Failed to send payment to payment hash {:?}: exhausted payment retry attempts",
                                hex_utils::hex_str(&payment_hash.0)
                        );
-                       if let Some(scid) = short_channel_id {
-                               print!(" because of failure at channel {}", scid);
-                       }
-                       if *rejected_by_dest {
-                               println!(": re-attempting the payment will not succeed");
-                       } else {
-                               println!(": payment may be retried");
-                       }
                        print!("> ");
                        io::stdout().flush().unwrap();
 
@@ -394,7 +401,7 @@ async fn start_ldk() {
        let mut user_config = UserConfig::default();
        user_config.peer_channel_config_limits.force_announced_channel_preference = false;
        let mut restarting_node = true;
-       let (channel_manager_blockhash, mut channel_manager) = {
+       let (channel_manager_blockhash, channel_manager) = {
                if let Ok(mut f) = fs::File::open(format!("{}/manager", ldk_data_dir.clone())) {
                        let mut channel_monitor_mut_references = Vec::new();
                        for (_, channel_monitor) in channelmonitors.iter_mut() {
@@ -441,7 +448,7 @@ async fn start_ldk() {
        let mut chain_tip: Option<poll::ValidatedBlockHeader> = None;
        if restarting_node {
                let mut chain_listeners =
-                       vec![(channel_manager_blockhash, &mut channel_manager as &mut dyn chain::Listen)];
+                       vec![(channel_manager_blockhash, &channel_manager as &dyn chain::Listen)];
 
                for (blockhash, channel_monitor) in channelmonitors.drain(..) {
                        let outpoint = channel_monitor.get_funding_txo().0;
@@ -453,10 +460,8 @@ async fn start_ldk() {
                }
 
                for monitor_listener_info in chain_listener_channel_monitors.iter_mut() {
-                       chain_listeners.push((
-                               monitor_listener_info.0,
-                               &mut monitor_listener_info.1 as &mut dyn chain::Listen,
-                       ));
+                       chain_listeners
+                               .push((monitor_listener_info.0, &monitor_listener_info.1 as &dyn chain::Listen));
                }
                chain_tip = Some(
                        init::synchronize_listeners(
@@ -513,7 +518,7 @@ async fn start_ldk() {
        };
        let peer_manager: Arc<PeerManager> = Arc::new(PeerManager::new(
                lightning_msg_handler,
-               keys_manager.get_node_secret(),
+               keys_manager.get_node_secret(Recipient::Node).unwrap(),
                &ephemeral_bytes,
                logger.clone(),
                Arc::new(IgnoringMessageHandler {}),
@@ -524,6 +529,8 @@ async fn start_ldk() {
 
        let peer_manager_connection_handler = peer_manager.clone();
        let listening_port = args.ldk_peer_listening_port;
+       let stop_listen = Arc::new(AtomicBool::new(false));
+       let stop_listen_ref = Arc::clone(&stop_listen);
        tokio::spawn(async move {
                let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", listening_port))
                        .await
@@ -531,6 +538,9 @@ async fn start_ldk() {
                loop {
                        let peer_mgr = peer_manager_connection_handler.clone();
                        let tcp_stream = listener.accept().await.unwrap().0;
+                       if stop_listen_ref.load(Ordering::Acquire) {
+                               return;
+                       }
                        tokio::spawn(async move {
                                lightning_net_tokio::setup_inbound(
                                        peer_mgr.clone(),
@@ -584,14 +594,48 @@ async fn start_ldk() {
                        event,
                ));
        };
-       // Step 16: Persist ChannelManager
+
+       // Step 16: Initialize routing ProbabilisticScorer
+       let scorer_path = format!("{}/prob_scorer", ldk_data_dir.clone());
+       let scorer = Arc::new(Mutex::new(disk::read_scorer(
+               Path::new(&scorer_path),
+               Arc::clone(&network_graph),
+       )));
+       let scorer_persist = Arc::clone(&scorer);
+       tokio::spawn(async move {
+               let mut interval = tokio::time::interval(Duration::from_secs(600));
+               loop {
+                       interval.tick().await;
+                       if disk::persist_scorer(Path::new(&scorer_path), &scorer_persist.lock().unwrap())
+                               .is_err()
+                       {
+                               // Persistence errors here are non-fatal as channels will be re-scored as payments
+                               // fail, but they may indicate a disk error which could be fatal elsewhere.
+                               eprintln!("Warning: Failed to persist scorer, check your disk and permissions");
+                       }
+               }
+       });
+
+       // Step 17: Create InvoicePayer
+       let router = DefaultRouter::new(network_graph.clone(), logger.clone());
+       let invoice_payer = Arc::new(InvoicePayer::new(
+               channel_manager.clone(),
+               router,
+               scorer.clone(),
+               logger.clone(),
+               event_handler,
+               payment::RetryAttempts(5),
+       ));
+
+       // Step 18: Persist ChannelManager
        let data_dir = ldk_data_dir.clone();
        let persist_channel_manager_callback =
                move |node: &ChannelManager| FilesystemPersister::persist_manager(data_dir.clone(), &*node);
-       // Step 17: Background Processing
+
+       // Step 19: Background Processing
        let background_processor = BackgroundProcessor::start(
                persist_channel_manager_callback,
-               event_handler,
+               invoice_payer.clone(),
                chain_monitor.clone(),
                channel_manager.clone(),
                Some(network_gossip.clone()),
@@ -599,22 +643,39 @@ async fn start_ldk() {
                logger.clone(),
        );
 
-       // Reconnect to channel peers if possible.
+       // Regularly reconnect to channel peers.
+       let connect_cm = Arc::clone(&channel_manager);
+       let connect_pm = Arc::clone(&peer_manager);
        let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir.clone());
-       match disk::read_channel_peer_data(Path::new(&peer_data_path)) {
-               Ok(mut info) => {
-                       for (pubkey, peer_addr) in info.drain() {
-                               for chan_info in channel_manager.list_channels() {
-                                       if pubkey == chan_info.counterparty.node_id {
-                                               let _ =
-                                                       cli::connect_peer_if_necessary(pubkey, peer_addr, peer_manager.clone())
+       tokio::spawn(async move {
+               let mut interval = tokio::time::interval(Duration::from_secs(1));
+               loop {
+                       interval.tick().await;
+                       match disk::read_channel_peer_data(Path::new(&peer_data_path)) {
+                               Ok(info) => {
+                                       let peers = connect_pm.get_peer_node_ids();
+                                       for node_id in connect_cm
+                                               .list_channels()
+                                               .iter()
+                                               .map(|chan| chan.counterparty.node_id)
+                                               .filter(|id| !peers.contains(id))
+                                       {
+                                               for (pubkey, peer_addr) in info.iter() {
+                                                       if *pubkey == node_id {
+                                                               let _ = cli::do_connect_peer(
+                                                                       *pubkey,
+                                                                       peer_addr.clone(),
+                                                                       Arc::clone(&connect_pm),
+                                                               )
                                                                .await;
+                                                       }
+                                               }
                                        }
                                }
+                               Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e),
                        }
                }
-               Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e),
-       }
+       });
 
        // Regularly broadcast our node_announcement. This is only required (or possible) if we have
        // some public channels, and is only useful if we have public listen address(es) to announce.
@@ -638,18 +699,23 @@ async fn start_ldk() {
 
        // Start the CLI.
        cli::poll_for_user_input(
-               peer_manager.clone(),
-               channel_manager.clone(),
-               keys_manager.clone(),
-               network_graph.clone(),
+               Arc::clone(&invoice_payer),
+               Arc::clone(&peer_manager),
+               Arc::clone(&channel_manager),
+               Arc::clone(&keys_manager),
+               Arc::clone(&network_graph),
                inbound_payments,
                outbound_payments,
                ldk_data_dir.clone(),
-               logger.clone(),
                network,
        )
        .await;
 
+       // Disconnect our peers and stop accepting new connections. This ensures we don't continue
+       // updating our channel data after we've stopped the background processor.
+       stop_listen.store(true, Ordering::Release);
+       peer_manager.disconnect_all_peers();
+
        // Stop the background processor.
        background_processor.stop().unwrap();
 }