X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Fmain.rs;h=39237d844d6969ddc341d8b5c2f802b423f4ba51;hb=7d0af178ba9a79008af7b3ea89e563749f8f7252;hp=4073be9ea73faa4395955d00b8f1693f3a002405;hpb=90c12f0f24689326ae09637a9ad620f781d97e5c;p=ldk-sample diff --git a/src/main.rs b/src/main.rs index 4073be9..39237d8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -9,8 +9,6 @@ use crate::disk::FilesystemLogger; use bitcoin::blockdata::constants::genesis_block; use bitcoin::blockdata::transaction::Transaction; use bitcoin::consensus::encode; -use bitcoin::hashes::sha256::Hash as Sha256; -use bitcoin::hashes::Hash; use bitcoin::network::constants::Network; use bitcoin::secp256k1::Secp256k1; use bitcoin::BlockHash; @@ -24,17 +22,20 @@ use lightning::ln::channelmanager; use lightning::ln::channelmanager::{ ChainParameters, ChannelManagerReadArgs, SimpleArcChannelManager, }; -use lightning::ln::peer_handler::{MessageHandler, SimpleArcPeerManager}; +use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; -use lightning::routing::network_graph::NetGraphMsgHandler; +use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph}; +use lightning::routing::scorer::Scorer; use lightning::util::config::UserConfig; -use lightning::util::events::Event; +use lightning::util::events::{Event, PaymentPurpose}; use lightning::util::ser::ReadableArgs; use lightning_background_processor::BackgroundProcessor; use lightning_block_sync::init; use lightning_block_sync::poll; use lightning_block_sync::SpvClient; use lightning_block_sync::UnboundedCache; +use lightning_invoice::payment; +use lightning_invoice::utils::DefaultRouter; use lightning_net_tokio::SocketDescriptor; use lightning_persister::FilesystemPersister; use rand::{thread_rng, Rng}; @@ -97,10 +98,20 @@ pub(crate) type PeerManager = SimpleArcPeerManager< pub(crate) type ChannelManager = SimpleArcChannelManager; +pub(crate) type InvoicePayer = payment::InvoicePayer< + Arc, + Router, + Arc>, + Arc, + E, +>; + +type Router = DefaultRouter, Arc>; + async fn handle_ldk_events( channel_manager: Arc, bitcoind_client: Arc, keys_manager: Arc, inbound_payments: PaymentInfoStorage, - outbound_payments: PaymentInfoStorage, network: Network, event: Event, + outbound_payments: PaymentInfoStorage, network: Network, event: &Event, ) { match event { Event::FundingGenerationReady { @@ -123,14 +134,12 @@ async fn handle_ldk_events( .expect("Lightning funding tx should always be to a SegWit output") .to_address(); let mut outputs = vec![HashMap::with_capacity(1)]; - outputs[0].insert(addr, channel_value_satoshis as f64 / 100_000_000.0); + outputs[0].insert(addr, *channel_value_satoshis as f64 / 100_000_000.0); let raw_tx = bitcoind_client.create_raw_transaction(outputs).await; // Have your wallet put the inputs into the transaction such that the output is // satisfied. let funded_tx = bitcoind_client.fund_raw_transaction(raw_tx).await; - let change_output_position = funded_tx.changepos; - assert!(change_output_position == 0 || change_output_position == 1); // Sign the final funding transaction and broadcast it. let signed_tx = bitcoind_client.sign_raw_transaction_with_wallet(funded_tx.hex).await; @@ -148,8 +157,14 @@ async fn handle_ldk_events( io::stdout().flush().unwrap(); } } - Event::PaymentReceived { payment_hash, payment_preimage, payment_secret, amt, .. } => { + Event::PaymentReceived { payment_hash, purpose, amt, .. } => { let mut payments = inbound_payments.lock().unwrap(); + let (payment_preimage, payment_secret) = match purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + (*payment_preimage, Some(*payment_secret)) + } + PaymentPurpose::SpontaneousPayment(preimage) => (Some(*preimage), None), + }; let status = match channel_manager.claim_funds(payment_preimage.unwrap()) { true => { println!( @@ -163,29 +178,28 @@ async fn handle_ldk_events( } _ => HTLCStatus::Failed, }; - match payments.entry(payment_hash) { + match payments.entry(*payment_hash) { Entry::Occupied(mut e) => { let payment = e.get_mut(); payment.status = status; - payment.preimage = Some(payment_preimage.unwrap()); - payment.secret = Some(payment_secret); + payment.preimage = payment_preimage; + payment.secret = payment_secret; } Entry::Vacant(e) => { e.insert(PaymentInfo { - preimage: Some(payment_preimage.unwrap()), - secret: Some(payment_secret), + preimage: payment_preimage, + secret: payment_secret, status, - amt_msat: MillisatAmount(Some(amt)), + amt_msat: MillisatAmount(Some(*amt)), }); } } } - Event::PaymentSent { payment_preimage } => { - let hashed = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + Event::PaymentSent { payment_preimage, payment_hash, .. } => { let mut payments = outbound_payments.lock().unwrap(); - for (payment_hash, payment) in payments.iter_mut() { - if *payment_hash == hashed { - payment.preimage = Some(payment_preimage); + for (hash, payment) in payments.iter_mut() { + if *hash == *payment_hash { + payment.preimage = Some(*payment_preimage); payment.status = HTLCStatus::Succeeded; println!( "\nEVENT: successfully sent payment of {} millisatoshis from \ @@ -199,15 +213,25 @@ async fn handle_ldk_events( } } } - Event::PaymentFailed { payment_hash, rejected_by_dest } => { + Event::PaymentPathFailed { + payment_hash, + rejected_by_dest, + all_paths_failed, + short_channel_id, + .. + } => { print!( - "\nEVENT: Failed to send payment to payment hash {:?}: ", + "\nEVENT: Failed to send payment{} to payment hash {:?}", + if *all_paths_failed { "" } else { " along MPP path" }, hex_utils::hex_str(&payment_hash.0) ); - if rejected_by_dest { - println!("re-attempting the payment will not succeed"); + if let Some(scid) = short_channel_id { + print!(" because of failure at channel {}", scid); + } + if *rejected_by_dest { + println!(": re-attempting the payment will not succeed"); } else { - println!("payment may be retried"); + println!(": exhausted payment retry attempts"); } print!("> "); io::stdout().flush().unwrap(); @@ -218,10 +242,27 @@ async fn handle_ldk_events( payment.status = HTLCStatus::Failed; } } + Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx } => { + let from_onchain_str = if *claim_from_onchain_tx { + "from onchain downstream claim" + } else { + "from HTLC fulfill message" + }; + if let Some(fee_earned) = fee_earned_msat { + println!( + "\nEVENT: Forwarded payment, earning {} msat {}", + fee_earned, from_onchain_str + ); + } else { + println!("\nEVENT: Forwarded payment, claiming onchain {}", from_onchain_str); + } + print!("> "); + io::stdout().flush().unwrap(); + } Event::PendingHTLCsForwardable { time_forwardable } => { let forwarding_channel_manager = channel_manager.clone(); + let min = time_forwardable.as_millis() as u64; tokio::spawn(async move { - let min = time_forwardable.as_millis() as u64; let millis_to_sleep = thread_rng().gen_range(min, min * 5) as u64; tokio::time::sleep(Duration::from_millis(millis_to_sleep)).await; forwarding_channel_manager.process_pending_htlc_forwards(); @@ -243,6 +284,19 @@ async fn handle_ldk_events( .unwrap(); bitcoind_client.broadcast_transaction(&spending_tx); } + Event::ChannelClosed { channel_id, reason, user_channel_id: _ } => { + println!( + "\nEVENT: Channel {} closed due to: {:?}", + hex_utils::hex_str(channel_id), + reason + ); + print!("> "); + io::stdout().flush().unwrap(); + } + Event::DiscardFunding { .. } => { + // A "real" node should probably "lock" the UTXOs spent in funding transactions until + // the funding transaction either confirms, or this event is generated. + } } } @@ -262,6 +316,7 @@ async fn start_ldk() { args.bitcoind_rpc_port, args.bitcoind_rpc_username.clone(), args.bitcoind_rpc_password.clone(), + tokio::runtime::Handle::current(), ) .await { @@ -347,7 +402,8 @@ async fn start_ldk() { let mut channelmonitors = persister.read_channelmonitors(keys_manager.clone()).unwrap(); // Step 8: Initialize the ChannelManager - let user_config = UserConfig::default(); + let mut user_config = UserConfig::default(); + user_config.peer_channel_config_limits.force_announced_channel_preference = false; let mut restarting_node = true; let (channel_manager_blockhash, mut channel_manager) = { if let Ok(mut f) = fs::File::open(format!("{}/manager", ldk_data_dir.clone())) { @@ -435,22 +491,19 @@ async fn start_ldk() { // Step 11: Optional: Initialize the NetGraphMsgHandler let genesis = genesis_block(args.network).header.block_hash(); let network_graph_path = format!("{}/network_graph", ldk_data_dir.clone()); - let network_graph = disk::read_network(Path::new(&network_graph_path), genesis); - let router = Arc::new(NetGraphMsgHandler::from_net_graph( + let network_graph = Arc::new(disk::read_network(Path::new(&network_graph_path), genesis)); + let network_gossip = Arc::new(NetGraphMsgHandler::new( + Arc::clone(&network_graph), None::>, logger.clone(), - network_graph, )); - let router_persist = Arc::clone(&router); + let network_graph_persist = Arc::clone(&network_graph); tokio::spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(600)); loop { interval.tick().await; - if disk::persist_network( - Path::new(&network_graph_path), - &*router_persist.network_graph.read().unwrap(), - ) - .is_err() + if disk::persist_network(Path::new(&network_graph_path), &network_graph_persist) + .is_err() { // Persistence errors here are non-fatal as we can just fetch the routing graph // again later, but they may indicate a disk error which could be fatal elsewhere. @@ -465,13 +518,16 @@ async fn start_ldk() { let channel_manager: Arc = Arc::new(channel_manager); let mut ephemeral_bytes = [0; 32]; rand::thread_rng().fill_bytes(&mut ephemeral_bytes); - let lightning_msg_handler = - MessageHandler { chan_handler: channel_manager.clone(), route_handler: router.clone() }; + let lightning_msg_handler = MessageHandler { + chan_handler: channel_manager.clone(), + route_handler: network_gossip.clone(), + }; let peer_manager: Arc = Arc::new(PeerManager::new( lightning_msg_handler, keys_manager.get_node_secret(), &ephemeral_bytes, logger.clone(), + Arc::new(IgnoringMessageHandler {}), )); // ## Running LDK @@ -528,7 +584,7 @@ async fn start_ldk() { let network = args.network; let bitcoind_rpc = bitcoind_client.clone(); let handle = tokio::runtime::Handle::current(); - let event_handler = move |event| { + let event_handler = move |event: &Event| { handle.block_on(handle_ldk_events( channel_manager_event_listener.clone(), bitcoind_rpc.clone(), @@ -537,38 +593,87 @@ async fn start_ldk() { outbound_pmts_for_events.clone(), network, event, - )) + )); }; - // Step 16: Persist ChannelManager + + // Step 16: Initialize routing Scorer + let scorer_path = format!("{}/scorer", ldk_data_dir.clone()); + let scorer = Arc::new(Mutex::new(disk::read_scorer(Path::new(&scorer_path)))); + let scorer_persist = Arc::clone(&scorer); + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(600)); + loop { + interval.tick().await; + if disk::persist_scorer(Path::new(&scorer_path), &scorer_persist.lock().unwrap()) + .is_err() + { + // Persistence errors here are non-fatal as channels will be re-scored as payments + // fail, but they may indicate a disk error which could be fatal elsewhere. + eprintln!("Warning: Failed to persist scorer, check your disk and permissions"); + } + } + }); + + // Step 17: Create InvoicePayer + let router = DefaultRouter::new(network_graph.clone(), logger.clone()); + let invoice_payer = Arc::new(InvoicePayer::new( + channel_manager.clone(), + router, + scorer.clone(), + logger.clone(), + event_handler, + payment::RetryAttempts(5), + )); + + // Step 18: Persist ChannelManager let data_dir = ldk_data_dir.clone(); let persist_channel_manager_callback = move |node: &ChannelManager| FilesystemPersister::persist_manager(data_dir.clone(), &*node); - // Step 17: Background Processing - BackgroundProcessor::start( + + // Step 19: Background Processing + let background_processor = BackgroundProcessor::start( persist_channel_manager_callback, - event_handler, + invoice_payer.clone(), chain_monitor.clone(), channel_manager.clone(), + Some(network_gossip.clone()), peer_manager.clone(), logger.clone(), ); - // Reconnect to channel peers if possible. + // Regularly reconnect to channel peers. + let connect_cm = Arc::clone(&channel_manager); + let connect_pm = Arc::clone(&peer_manager); let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir.clone()); - match disk::read_channel_peer_data(Path::new(&peer_data_path)) { - Ok(mut info) => { - for (pubkey, peer_addr) in info.drain() { - for chan_info in channel_manager.list_channels() { - if pubkey == chan_info.counterparty.node_id { - let _ = - cli::connect_peer_if_necessary(pubkey, peer_addr, peer_manager.clone()) + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(1)); + loop { + interval.tick().await; + match disk::read_channel_peer_data(Path::new(&peer_data_path)) { + Ok(info) => { + let peers = connect_pm.get_peer_node_ids(); + for node_id in connect_cm + .list_channels() + .iter() + .map(|chan| chan.counterparty.node_id) + .filter(|id| !peers.contains(id)) + { + for (pubkey, peer_addr) in info.iter() { + if *pubkey == node_id { + let _ = cli::do_connect_peer( + *pubkey, + peer_addr.clone(), + Arc::clone(&connect_pm), + ) .await; + } + } } } + Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e), } } - Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e), - } + }); // Regularly broadcast our node_announcement. This is only required (or possible) if we have // some public channels, and is only useful if we have public listen address(es) to announce. @@ -592,10 +697,12 @@ async fn start_ldk() { // Start the CLI. cli::poll_for_user_input( + invoice_payer.clone(), peer_manager.clone(), channel_manager.clone(), keys_manager.clone(), - router.clone(), + network_graph.clone(), + scorer.clone(), inbound_payments, outbound_payments, ldk_data_dir.clone(), @@ -603,6 +710,9 @@ async fn start_ldk() { network, ) .await; + + // Stop the background processor. + background_processor.stop().unwrap(); } #[tokio::main]