X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=src%2Fmain.rs;h=57f0bb2ea7f9017c42feb5b6ef7b9b8818bbda7d;hb=aa1635c16624664ba2fde72c0f58da7437e50b74;hp=f740cd1d604c32e1c379ebcc2a53106b99dc4533;hpb=1b034d63a4cb4e0b249ae6240166513b6be76737;p=ldk-sample diff --git a/src/main.rs b/src/main.rs index f740cd1..a33d667 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,424 +1,1095 @@ -mod bitcoind_client; +mod args; +pub mod bitcoind_client; mod cli; -mod utils; +mod convert; +mod disk; +mod hex_utils; +mod sweep; -use background_processor::BackgroundProcessor; -use bitcoin::{BlockHash, Txid}; -use bitcoin::blockdata::constants::genesis_block; +use crate::bitcoind_client::BitcoindClient; +use crate::disk::FilesystemLogger; use bitcoin::blockdata::transaction::Transaction; use bitcoin::consensus::encode; -use bitcoin::hashes::hex::FromHex; use bitcoin::network::constants::Network; -use bitcoin::secp256k1::Secp256k1; -use bitcoin::util::address::Address; +use bitcoin::BlockHash; use bitcoin_bech32::WitnessProgram; -use crate::bitcoind_client::BitcoindClient; -use lightning::chain; -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use lightning::chain::chainmonitor::ChainMonitor; -use lightning::chain::channelmonitor::ChannelMonitor; -use lightning::chain::Filter; -use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager}; -use lightning::chain::transaction::OutPoint; -use lightning::chain::Watch; -use lightning::ln::channelmanager; -use lightning::ln::channelmanager::{ChannelManagerReadArgs, PaymentHash, PaymentPreimage, - SimpleArcChannelManager}; -use lightning::ln::peer_handler::{MessageHandler, SimpleArcPeerManager}; +use disk::{INBOUND_PAYMENTS_FNAME, OUTBOUND_PAYMENTS_FNAME}; +use lightning::chain::{chainmonitor, ChannelMonitorUpdateStatus}; +use lightning::chain::{Filter, Watch}; +use lightning::events::bump_transaction::{BumpTransactionEventHandler, Wallet}; +use lightning::events::{Event, PaymentFailureReason, PaymentPurpose}; +use lightning::ln::channelmanager::{self, RecentPaymentDetails}; +use lightning::ln::channelmanager::{ + ChainParameters, ChannelManagerReadArgs, SimpleArcChannelManager, +}; +use lightning::ln::msgs::DecodeError; +use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager}; +use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::onion_message::{DefaultMessageRouter, SimpleArcOnionMessenger}; +use lightning::routing::gossip; +use lightning::routing::gossip::{NodeId, P2PGossipSync}; +use lightning::routing::router::DefaultRouter; +use lightning::routing::scoring::ProbabilisticScoringFeeParameters; +use lightning::sign::{EntropySource, InMemorySigner, KeysManager, SpendableOutputDescriptor}; use lightning::util::config::UserConfig; -use lightning::util::events::{Event, EventsProvider}; -use lightning::util::logger::{Logger, Record}; -use lightning::util::ser::{ReadableArgs, Writer}; -use lightning_block_sync::UnboundedCache; -use lightning_block_sync::SpvClient; -use lightning_block_sync::http::HttpEndpoint; +use lightning::util::persist::{self, KVStore, MonitorUpdatingPersister}; +use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; +use lightning::{chain, impl_writeable_tlv_based, impl_writeable_tlv_based_enum}; +use lightning_background_processor::{process_events_async, GossipSync}; use lightning_block_sync::init; use lightning_block_sync::poll; -use lightning_block_sync::poll::{ChainTip, Poll}; -use lightning_block_sync::rpc::RpcClient; +use lightning_block_sync::SpvClient; +use lightning_block_sync::UnboundedCache; use lightning_net_tokio::SocketDescriptor; -use lightning_persister::FilesystemPersister; +use lightning_persister::fs_store::FilesystemStore; use rand::{thread_rng, Rng}; -use lightning::routing::network_graph::NetGraphMsgHandler; -use std::cell::RefCell; +use std::collections::hash_map::Entry; use std::collections::HashMap; +use std::convert::TryInto; +use std::fmt; use std::fs; use std::fs::File; -use std::io::Cursor; +use std::io; +use std::io::Write; use std::path::Path; -use std::str::FromStr; -use std::sync::{Arc, Mutex}; -use std::thread; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex, RwLock}; use std::time::{Duration, SystemTime}; -use time::OffsetDateTime; -use tokio::runtime::Runtime; -use tokio::sync::mpsc; - -const NETWORK: Network = Network::Regtest; - -pub struct FilesystemLogger{} -impl Logger for FilesystemLogger { - fn log(&self, record: &Record) { - let raw_log = record.args.to_string(); - let log = format!("{} {:<5} [{}:{}] {}", OffsetDateTime::now_utc().format("%F %T"), - record.level.to_string(), record.module_path, record.line, raw_log); - fs::create_dir_all("logs").unwrap(); - fs::OpenOptions::new().create(true).append(true).open("./logs/logs.txt").unwrap() - .write_all(log.as_bytes()).unwrap(); - } + +pub(crate) const PENDING_SPENDABLE_OUTPUT_DIR: &'static str = "pending_spendable_outputs"; + +#[derive(Copy, Clone)] +pub(crate) enum HTLCStatus { + Pending, + Succeeded, + Failed, +} + +impl_writeable_tlv_based_enum!(HTLCStatus, + (0, Pending) => {}, + (1, Succeeded) => {}, + (2, Failed) => {}; +); + +pub(crate) struct MillisatAmount(Option); + +impl fmt::Display for MillisatAmount { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.0 { + Some(amt) => write!(f, "{}", amt), + None => write!(f, "unknown"), + } + } +} + +impl Readable for MillisatAmount { + fn read(r: &mut R) -> Result { + let amt: Option = Readable::read(r)?; + Ok(MillisatAmount(amt)) + } +} + +impl Writeable for MillisatAmount { + fn write(&self, w: &mut W) -> Result<(), std::io::Error> { + self.0.write(w) + } } -fn read_channelmonitors_from_disk(path: String, keys_manager: Arc) -> - Result)>, std::io::Error> -{ - if !Path::new(&path).exists() { - return Ok(HashMap::new()) - } - let mut outpoint_to_channelmonitor = HashMap::new(); - for file_option in fs::read_dir(path).unwrap() { - let file = file_option.unwrap(); - let owned_file_name = file.file_name(); - let filename = owned_file_name.to_str(); - if !filename.is_some() || !filename.unwrap().is_ascii() || filename.unwrap().len() < 65 { - return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid ChannelMonitor file name")); - } - - let txid = Txid::from_hex(filename.unwrap().split_at(64).0); - if txid.is_err() { - return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid tx ID in filename")); - } - - let index = filename.unwrap().split_at(65).1.split('.').next().unwrap().parse(); - if index.is_err() { - return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid tx index in filename")); - } - - let contents = fs::read(&file.path())?; - - if let Ok((blockhash, channel_monitor)) = - <(BlockHash, ChannelMonitor)>::read(&mut Cursor::new(&contents), - &*keys_manager) - { - outpoint_to_channelmonitor.insert(OutPoint { txid: txid.unwrap(), index: index.unwrap() }, - (blockhash, channel_monitor)); - } else { - return Err(std::io::Error::new(std::io::ErrorKind::Other, - "Failed to deserialize ChannelMonitor")); - } - } - Ok(outpoint_to_channelmonitor) +pub(crate) struct PaymentInfo { + preimage: Option, + secret: Option, + status: HTLCStatus, + amt_msat: MillisatAmount, } -type Invoice = String; +impl_writeable_tlv_based!(PaymentInfo, { + (0, preimage, required), + (2, secret, required), + (4, status, required), + (6, amt_msat, required), +}); + +pub(crate) struct PaymentInfoStorage { + payments: HashMap, +} + +impl_writeable_tlv_based!(PaymentInfoStorage, { + (0, payments, required), +}); + +type ChainMonitor = chainmonitor::ChainMonitor< + InMemorySigner, + Arc, + Arc, + Arc, + Arc, + Arc< + MonitorUpdatingPersister< + Arc, + Arc, + Arc, + Arc, + >, + >, +>; -enum HTLCDirection { - Inbound, - Outbound +pub(crate) type GossipVerifier = lightning_block_sync::gossip::GossipVerifier< + lightning_block_sync::gossip::TokioSpawner, + Arc, + Arc, + SocketDescriptor, + Arc, + Arc, + IgnoringMessageHandler, + Arc, +>; + +pub(crate) type PeerManager = SimpleArcPeerManager< + SocketDescriptor, + ChainMonitor, + BitcoindClient, + BitcoindClient, + GossipVerifier, + FilesystemLogger, +>; + +pub(crate) type ChannelManager = + SimpleArcChannelManager; + +pub(crate) type NetworkGraph = gossip::NetworkGraph>; + +type OnionMessenger = + SimpleArcOnionMessenger; + +pub(crate) type BumpTxEventHandler = BumpTransactionEventHandler< + Arc, + Arc, Arc>>, + Arc, + Arc, +>; + +async fn handle_ldk_events( + channel_manager: &Arc, bitcoind_client: &BitcoindClient, + network_graph: &NetworkGraph, keys_manager: &KeysManager, + bump_tx_event_handler: &BumpTxEventHandler, inbound_payments: Arc>, + outbound_payments: Arc>, fs_store: &Arc, + network: Network, event: Event, +) { + match event { + Event::FundingGenerationReady { + temporary_channel_id, + counterparty_node_id, + channel_value_satoshis, + output_script, + .. + } => { + // Construct the raw transaction with one output, that is paid the amount of the + // channel. + let addr = WitnessProgram::from_scriptpubkey( + &output_script[..], + match network { + Network::Bitcoin => bitcoin_bech32::constants::Network::Bitcoin, + Network::Testnet => bitcoin_bech32::constants::Network::Testnet, + Network::Regtest => bitcoin_bech32::constants::Network::Regtest, + Network::Signet => bitcoin_bech32::constants::Network::Signet, + }, + ) + .expect("Lightning funding tx should always be to a SegWit output") + .to_address(); + let mut outputs = vec![HashMap::with_capacity(1)]; + outputs[0].insert(addr, channel_value_satoshis as f64 / 100_000_000.0); + let raw_tx = bitcoind_client.create_raw_transaction(outputs).await; + + // Have your wallet put the inputs into the transaction such that the output is + // satisfied. + let funded_tx = bitcoind_client.fund_raw_transaction(raw_tx).await; + + // Sign the final funding transaction and broadcast it. + let signed_tx = bitcoind_client.sign_raw_transaction_with_wallet(funded_tx.hex).await; + assert_eq!(signed_tx.complete, true); + let final_tx: Transaction = + encode::deserialize(&hex_utils::to_vec(&signed_tx.hex).unwrap()).unwrap(); + // Give the funding transaction back to LDK for opening the channel. + if channel_manager + .funding_transaction_generated( + &temporary_channel_id, + &counterparty_node_id, + final_tx, + ) + .is_err() + { + println!( + "\nERROR: Channel went away before we could fund it. The peer disconnected or refused the channel."); + print!("> "); + io::stdout().flush().unwrap(); + } + } + Event::PaymentClaimable { + payment_hash, + purpose, + amount_msat, + receiver_node_id: _, + via_channel_id: _, + via_user_channel_id: _, + claim_deadline: _, + onion_fields: _, + counterparty_skimmed_fee_msat: _, + } => { + println!( + "\nEVENT: received payment from payment hash {} of {} millisatoshis", + payment_hash, amount_msat, + ); + print!("> "); + io::stdout().flush().unwrap(); + let payment_preimage = match purpose { + PaymentPurpose::InvoicePayment { payment_preimage, .. } => payment_preimage, + PaymentPurpose::SpontaneousPayment(preimage) => Some(preimage), + }; + channel_manager.claim_funds(payment_preimage.unwrap()); + } + Event::PaymentClaimed { + payment_hash, + purpose, + amount_msat, + receiver_node_id: _, + htlcs: _, + sender_intended_total_msat: _, + } => { + println!( + "\nEVENT: claimed payment from payment hash {} of {} millisatoshis", + payment_hash, amount_msat, + ); + print!("> "); + io::stdout().flush().unwrap(); + let (payment_preimage, payment_secret) = match purpose { + PaymentPurpose::InvoicePayment { payment_preimage, payment_secret, .. } => { + (payment_preimage, Some(payment_secret)) + } + PaymentPurpose::SpontaneousPayment(preimage) => (Some(preimage), None), + }; + let mut inbound = inbound_payments.lock().unwrap(); + match inbound.payments.entry(payment_hash) { + Entry::Occupied(mut e) => { + let payment = e.get_mut(); + payment.status = HTLCStatus::Succeeded; + payment.preimage = payment_preimage; + payment.secret = payment_secret; + } + Entry::Vacant(e) => { + e.insert(PaymentInfo { + preimage: payment_preimage, + secret: payment_secret, + status: HTLCStatus::Succeeded, + amt_msat: MillisatAmount(Some(amount_msat)), + }); + } + } + fs_store.write("", "", INBOUND_PAYMENTS_FNAME, &inbound.encode()).unwrap(); + } + Event::PaymentSent { payment_preimage, payment_hash, fee_paid_msat, .. } => { + let mut outbound = outbound_payments.lock().unwrap(); + for (hash, payment) in outbound.payments.iter_mut() { + if *hash == payment_hash { + payment.preimage = Some(payment_preimage); + payment.status = HTLCStatus::Succeeded; + println!( + "\nEVENT: successfully sent payment of {} millisatoshis{} from \ + payment hash {} with preimage {}", + payment.amt_msat, + if let Some(fee) = fee_paid_msat { + format!(" (fee {} msat)", fee) + } else { + "".to_string() + }, + payment_hash, + payment_preimage + ); + print!("> "); + io::stdout().flush().unwrap(); + } + } + fs_store.write("", "", OUTBOUND_PAYMENTS_FNAME, &outbound.encode()).unwrap(); + } + Event::OpenChannelRequest { + ref temporary_channel_id, ref counterparty_node_id, .. + } => { + let mut random_bytes = [0u8; 16]; + random_bytes.copy_from_slice(&keys_manager.get_secure_random_bytes()[..16]); + let user_channel_id = u128::from_be_bytes(random_bytes); + let res = channel_manager.accept_inbound_channel( + temporary_channel_id, + counterparty_node_id, + user_channel_id, + ); + + if let Err(e) = res { + print!( + "\nEVENT: Failed to accept inbound channel ({}) from {}: {:?}", + temporary_channel_id, + hex_utils::hex_str(&counterparty_node_id.serialize()), + e, + ); + } else { + print!( + "\nEVENT: Accepted inbound channel ({}) from {}", + temporary_channel_id, + hex_utils::hex_str(&counterparty_node_id.serialize()), + ); + } + print!("> "); + io::stdout().flush().unwrap(); + } + Event::PaymentPathSuccessful { .. } => {} + Event::PaymentPathFailed { .. } => {} + Event::ProbeSuccessful { .. } => {} + Event::ProbeFailed { .. } => {} + Event::PaymentFailed { payment_hash, reason, .. } => { + print!( + "\nEVENT: Failed to send payment to payment hash {}: {:?}", + payment_hash, + if let Some(r) = reason { r } else { PaymentFailureReason::RetriesExhausted } + ); + print!("> "); + io::stdout().flush().unwrap(); + + let mut outbound = outbound_payments.lock().unwrap(); + if outbound.payments.contains_key(&payment_hash) { + let payment = outbound.payments.get_mut(&payment_hash).unwrap(); + payment.status = HTLCStatus::Failed; + } + fs_store.write("", "", OUTBOUND_PAYMENTS_FNAME, &outbound.encode()).unwrap(); + } + Event::InvoiceRequestFailed { payment_id } => { + print!("\nEVENT: Failed to request invoice to send payment with id {}", payment_id); + print!("> "); + io::stdout().flush().unwrap(); + + // TODO: mark the payment as failed + } + Event::PaymentForwarded { + prev_channel_id, + next_channel_id, + fee_earned_msat, + claim_from_onchain_tx, + outbound_amount_forwarded_msat, + } => { + let read_only_network_graph = network_graph.read_only(); + let nodes = read_only_network_graph.nodes(); + let channels = channel_manager.list_channels(); + + let node_str = |channel_id: &Option| match channel_id { + None => String::new(), + Some(channel_id) => match channels.iter().find(|c| c.channel_id == *channel_id) { + None => String::new(), + Some(channel) => { + match nodes.get(&NodeId::from_pubkey(&channel.counterparty.node_id)) { + None => "private node".to_string(), + Some(node) => match &node.announcement_info { + None => "unnamed node".to_string(), + Some(announcement) => { + format!("node {}", announcement.alias) + } + }, + } + } + }, + }; + let channel_str = |channel_id: &Option| { + channel_id + .map(|channel_id| format!(" with channel {}", channel_id)) + .unwrap_or_default() + }; + let from_prev_str = + format!(" from {}{}", node_str(&prev_channel_id), channel_str(&prev_channel_id)); + let to_next_str = + format!(" to {}{}", node_str(&next_channel_id), channel_str(&next_channel_id)); + + let from_onchain_str = if claim_from_onchain_tx { + "from onchain downstream claim" + } else { + "from HTLC fulfill message" + }; + let amt_args = if let Some(v) = outbound_amount_forwarded_msat { + format!("{}", v) + } else { + "?".to_string() + }; + if let Some(fee_earned) = fee_earned_msat { + println!( + "\nEVENT: Forwarded payment for {} msat{}{}, earning {} msat {}", + amt_args, from_prev_str, to_next_str, fee_earned, from_onchain_str + ); + } else { + println!( + "\nEVENT: Forwarded payment for {} msat{}{}, claiming onchain {}", + amt_args, from_prev_str, to_next_str, from_onchain_str + ); + } + print!("> "); + io::stdout().flush().unwrap(); + } + Event::HTLCHandlingFailed { .. } => {} + Event::PendingHTLCsForwardable { time_forwardable } => { + let forwarding_channel_manager = channel_manager.clone(); + let min = time_forwardable.as_millis() as u64; + tokio::spawn(async move { + let millis_to_sleep = thread_rng().gen_range(min, min * 5) as u64; + tokio::time::sleep(Duration::from_millis(millis_to_sleep)).await; + forwarding_channel_manager.process_pending_htlc_forwards(); + }); + } + Event::SpendableOutputs { outputs, channel_id: _ } => { + // SpendableOutputDescriptors, of which outputs is a vec of, are critical to keep track + // of! While a `StaticOutput` descriptor is just an output to a static, well-known key, + // other descriptors are not currently ever regenerated for you by LDK. Once we return + // from this method, the descriptor will be gone, and you may lose track of some funds. + // + // Here we simply persist them to disk, with a background task running which will try + // to spend them regularly (possibly duplicatively/RBF'ing them). These can just be + // treated as normal funds where possible - they are only spendable by us and there is + // no rush to claim them. + for output in outputs { + let key = hex_utils::hex_str(&keys_manager.get_secure_random_bytes()); + // Note that if the type here changes our read code needs to change as well. + let output: SpendableOutputDescriptor = output; + fs_store.write(PENDING_SPENDABLE_OUTPUT_DIR, "", &key, &output.encode()).unwrap(); + } + } + Event::ChannelPending { channel_id, counterparty_node_id, .. } => { + println!( + "\nEVENT: Channel {} with peer {} is pending awaiting funding lock-in!", + channel_id, + hex_utils::hex_str(&counterparty_node_id.serialize()), + ); + print!("> "); + io::stdout().flush().unwrap(); + } + Event::ChannelReady { + ref channel_id, + user_channel_id: _, + ref counterparty_node_id, + channel_type: _, + } => { + println!( + "\nEVENT: Channel {} with peer {} is ready to be used!", + channel_id, + hex_utils::hex_str(&counterparty_node_id.serialize()), + ); + print!("> "); + io::stdout().flush().unwrap(); + } + Event::ChannelClosed { + channel_id, + reason, + user_channel_id: _, + counterparty_node_id, + channel_capacity_sats: _, + } => { + println!( + "\nEVENT: Channel {} with counterparty {} closed due to: {:?}", + channel_id, + counterparty_node_id.map(|id| format!("{}", id)).unwrap_or("".to_owned()), + reason + ); + print!("> "); + io::stdout().flush().unwrap(); + } + Event::DiscardFunding { .. } => { + // A "real" node should probably "lock" the UTXOs spent in funding transactions until + // the funding transaction either confirms, or this event is generated. + } + Event::HTLCIntercepted { .. } => {} + Event::BumpTransaction(event) => bump_tx_event_handler.handle_event(&event), + } } -type PaymentInfoStorage = Arc, HTLCDirection)>>>; - -type ArcChainMonitor = ChainMonitor, Arc, -Arc, Arc, Arc>; - -pub(crate) type PeerManager = SimpleArcPeerManager; - -pub(crate) type ChannelManager = SimpleArcChannelManager; - - -fn handle_ldk_events(peer_manager: Arc, channel_manager: Arc, - chain_monitor: Arc, bitcoind_rpc_client: Arc, - keys_manager: Arc, mut pending_txs: HashMap, - htlcs: PaymentInfoStorage) -> HashMap -{ - peer_manager.process_events(); - let mut check_for_more_events = true; - while check_for_more_events { - let loop_channel_manager = channel_manager.clone(); - check_for_more_events = false; - let mut events = channel_manager.get_and_clear_pending_events(); - events.append(&mut chain_monitor.get_and_clear_pending_events()); - let mut rpc = bitcoind_rpc_client.bitcoind_rpc_client.lock().unwrap(); - for event in events { - match event { - Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, - output_script, .. } => { - let addr = WitnessProgram::from_scriptpubkey(&output_script[..], match NETWORK { - Network::Bitcoin => bitcoin_bech32::constants::Network::Bitcoin, - Network::Testnet => bitcoin_bech32::constants::Network::Testnet, - Network::Regtest => bitcoin_bech32::constants::Network::Regtest, - } - ).expect("Lightning funding tx should always be to a SegWit output").to_address(); - let outputs = format!("{{\"{}\": {}}}", addr, channel_value_satoshis as f64 / 1_000_000_00.0).to_string(); - let tx_hex = rpc.call_method("createrawtransaction", &vec![serde_json::json!(outputs)]).unwrap(); - let raw_tx = format!("\"{}\"", tx_hex.as_str().unwrap()).to_string(); - let funded_tx = rpc.call_method("fundrawtransaction", &vec![serde_json::json!(raw_tx)]).unwrap(); - let change_output_position = funded_tx["changepos"].as_i64().unwrap(); - assert!(change_output_position == 0 || change_output_position == 1); - let funded_tx = format!("\"{}\"", funded_tx["hex"].as_str().unwrap()).to_string(); - let signed_tx = rpc.call_method("signrawtransactionwithwallet", - &vec![serde_json::json!(funded_tx)]).unwrap(); - assert_eq!(signed_tx["complete"].as_bool().unwrap(), true); - let final_tx: Transaction = encode::deserialize(&utils::hex_to_vec(&signed_tx["hex"].as_str().unwrap()).unwrap()).unwrap(); - let outpoint = OutPoint { - txid: final_tx.txid(), - index: if change_output_position == 0 { 1 } else { 0 } - }; - loop_channel_manager.funding_transaction_generated(&temporary_channel_id, outpoint); - pending_txs.insert(outpoint, final_tx); - check_for_more_events = true; - }, - Event::FundingBroadcastSafe { funding_txo, .. } => { - let funding_tx = pending_txs.remove(&funding_txo).unwrap(); - bitcoind_rpc_client.broadcast_transaction(&funding_tx); - }, - Event::PaymentReceived { payment_hash, payment_secret, amt: amt_msat } => { - let payment_info = htlcs.lock().unwrap(); - if let Some(htlc_info) = payment_info.get(&payment_hash) { - assert!(loop_channel_manager.claim_funds(htlc_info.1.unwrap().clone(), - &payment_secret, amt_msat)); - } else { - loop_channel_manager.fail_htlc_backwards(&payment_hash, &payment_secret); - } - check_for_more_events = true; - }, - Event::PaymentSent { payment_preimage } => { - let payment_info = htlcs.lock().unwrap(); - for (invoice, preimage_option, _) in payment_info.values() { - if let Some(preimage) = preimage_option { - if payment_preimage == *preimage { - println!("NEW EVENT: successfully sent payment from invoice {} with preimage {}", - invoice, utils::hex_str(&payment_preimage.0)); - } - } - } - }, - Event::PaymentFailed { payment_hash, rejected_by_dest } => { - let payment_info = htlcs.lock().unwrap(); - let htlc_info = payment_info.get(&payment_hash).unwrap(); - print!("NEW EVENT: Failed to send payment to invoice {}:", htlc_info.0); - if rejected_by_dest { - println!("rejected by destination node"); - } else { - println!("route failed"); - } - }, - Event::PendingHTLCsForwardable { .. } => { - loop_channel_manager.process_pending_htlc_forwards(); - check_for_more_events = true; - }, - Event::SpendableOutputs { outputs } => { - let addr_args = vec![serde_json::json!("LDK output address")]; - let destination_address_str = rpc.call_method("getnewaddress", &addr_args).unwrap(); - let destination_address = Address::from_str(destination_address_str.as_str().unwrap()).unwrap(); - let output_descriptors = &outputs.iter().map(|a| a).collect::>(); - let tx_feerate = bitcoind_rpc_client.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); - let spending_tx = keys_manager.spend_spendable_outputs(output_descriptors, - Vec::new(), - destination_address.script_pubkey(), - tx_feerate, &Secp256k1::new()).unwrap(); - bitcoind_rpc_client.broadcast_transaction(&spending_tx); - // XXX maybe need to rescan and blah? but contrary to what matt's saying, it - // looks like spend_spendable's got us covered - } - } - } - } - pending_txs +async fn start_ldk() { + let args = match args::parse_startup_args() { + Ok(user_args) => user_args, + Err(()) => return, + }; + + // Initialize the LDK data directory if necessary. + let ldk_data_dir = format!("{}/.ldk", args.ldk_storage_dir_path); + fs::create_dir_all(ldk_data_dir.clone()).unwrap(); + + // ## Setup + // Step 1: Initialize the Logger + let logger = Arc::new(FilesystemLogger::new(ldk_data_dir.clone())); + + // Initialize our bitcoind client. + let bitcoind_client = match BitcoindClient::new( + args.bitcoind_rpc_host.clone(), + args.bitcoind_rpc_port, + args.bitcoind_rpc_username.clone(), + args.bitcoind_rpc_password.clone(), + tokio::runtime::Handle::current(), + Arc::clone(&logger), + ) + .await + { + Ok(client) => Arc::new(client), + Err(e) => { + println!("Failed to connect to bitcoind client: {}", e); + return; + } + }; + + // Check that the bitcoind we've connected to is running the network we expect + let bitcoind_chain = bitcoind_client.get_blockchain_info().await.chain; + if bitcoind_chain + != match args.network { + bitcoin::Network::Bitcoin => "main", + bitcoin::Network::Testnet => "test", + bitcoin::Network::Regtest => "regtest", + bitcoin::Network::Signet => "signet", + } { + println!( + "Chain argument ({}) didn't match bitcoind chain ({})", + args.network, bitcoind_chain + ); + return; + } + + // Step 2: Initialize the FeeEstimator + + // BitcoindClient implements the FeeEstimator trait, so it'll act as our fee estimator. + let fee_estimator = bitcoind_client.clone(); + + // Step 3: Initialize the BroadcasterInterface + + // BitcoindClient implements the BroadcasterInterface trait, so it'll act as our transaction + // broadcaster. + let broadcaster = bitcoind_client.clone(); + + // Step 4: Initialize the KeysManager + + // The key seed that we use to derive the node privkey (that corresponds to the node pubkey) and + // other secret key material. + let keys_seed_path = format!("{}/keys_seed", ldk_data_dir.clone()); + let keys_seed = if let Ok(seed) = fs::read(keys_seed_path.clone()) { + assert_eq!(seed.len(), 32); + let mut key = [0; 32]; + key.copy_from_slice(&seed); + key + } else { + let mut key = [0; 32]; + thread_rng().fill_bytes(&mut key); + match File::create(keys_seed_path.clone()) { + Ok(mut f) => { + Write::write_all(&mut f, &key).expect("Failed to write node keys seed to disk"); + f.sync_all().expect("Failed to sync node keys seed to disk"); + } + Err(e) => { + println!("ERROR: Unable to create keys seed file {}: {}", keys_seed_path, e); + return; + } + } + key + }; + let cur = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); + let keys_manager = Arc::new(KeysManager::new(&keys_seed, cur.as_secs(), cur.subsec_nanos())); + + let bump_tx_event_handler = Arc::new(BumpTransactionEventHandler::new( + Arc::clone(&broadcaster), + Arc::new(Wallet::new(Arc::clone(&bitcoind_client), Arc::clone(&logger))), + Arc::clone(&keys_manager), + Arc::clone(&logger), + )); + + // Step 5: Initialize Persistence + let fs_store = Arc::new(FilesystemStore::new(ldk_data_dir.clone().into())); + let persister = Arc::new(MonitorUpdatingPersister::new( + Arc::clone(&fs_store), + Arc::clone(&logger), + 1000, + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + )); + // Alternatively, you can use the `FilesystemStore` as a `Persist` directly, at the cost of + // larger `ChannelMonitor` update writes (but no deletion or cleanup): + //let persister = Arc::clone(&fs_store); + + // Step 6: Initialize the ChainMonitor + let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( + None, + Arc::clone(&broadcaster), + Arc::clone(&logger), + Arc::clone(&fee_estimator), + Arc::clone(&persister), + )); + + // Step 7: Read ChannelMonitor state from disk + let mut channelmonitors = persister + .read_all_channel_monitors_with_updates(&bitcoind_client, &bitcoind_client) + .unwrap(); + // If you are using the `FilesystemStore` as a `Persist` directly, use + // `lightning::util::persist::read_channel_monitors` like this: + //read_channel_monitors(Arc::clone(&persister), Arc::clone(&keys_manager), Arc::clone(&keys_manager)).unwrap(); + + // Step 8: Poll for the best chain tip, which may be used by the channel manager & spv client + let polled_chain_tip = init::validate_best_block_header(bitcoind_client.as_ref()) + .await + .expect("Failed to fetch best block header and best block"); + + // Step 9: Initialize routing ProbabilisticScorer + let network_graph_path = format!("{}/network_graph", ldk_data_dir.clone()); + let network_graph = + Arc::new(disk::read_network(Path::new(&network_graph_path), args.network, logger.clone())); + + let scorer_path = format!("{}/scorer", ldk_data_dir.clone()); + let scorer = Arc::new(RwLock::new(disk::read_scorer( + Path::new(&scorer_path), + Arc::clone(&network_graph), + Arc::clone(&logger), + ))); + + // Step 10: Create Router + let scoring_fee_params = ProbabilisticScoringFeeParameters::default(); + let router = Arc::new(DefaultRouter::new( + network_graph.clone(), + logger.clone(), + keys_manager.get_secure_random_bytes(), + scorer.clone(), + scoring_fee_params, + )); + + // Step 11: Initialize the ChannelManager + let mut user_config = UserConfig::default(); + user_config.channel_handshake_limits.force_announced_channel_preference = false; + user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + user_config.manually_accept_inbound_channels = true; + let mut restarting_node = true; + let (channel_manager_blockhash, channel_manager) = { + if let Ok(mut f) = fs::File::open(format!("{}/manager", ldk_data_dir.clone())) { + let mut channel_monitor_mut_references = Vec::new(); + for (_, channel_monitor) in channelmonitors.iter_mut() { + channel_monitor_mut_references.push(channel_monitor); + } + let read_args = ChannelManagerReadArgs::new( + keys_manager.clone(), + keys_manager.clone(), + keys_manager.clone(), + fee_estimator.clone(), + chain_monitor.clone(), + broadcaster.clone(), + router, + logger.clone(), + user_config, + channel_monitor_mut_references, + ); + <(BlockHash, ChannelManager)>::read(&mut f, read_args).unwrap() + } else { + // We're starting a fresh node. + restarting_node = false; + + let polled_best_block = polled_chain_tip.to_best_block(); + let polled_best_block_hash = polled_best_block.block_hash(); + let chain_params = + ChainParameters { network: args.network, best_block: polled_best_block }; + let fresh_channel_manager = channelmanager::ChannelManager::new( + fee_estimator.clone(), + chain_monitor.clone(), + broadcaster.clone(), + router, + logger.clone(), + keys_manager.clone(), + keys_manager.clone(), + keys_manager.clone(), + user_config, + chain_params, + cur.as_secs() as u32, + ); + (polled_best_block_hash, fresh_channel_manager) + } + }; + + // Step 12: Sync ChannelMonitors and ChannelManager to chain tip + let mut chain_listener_channel_monitors = Vec::new(); + let mut cache = UnboundedCache::new(); + let chain_tip = if restarting_node { + let mut chain_listeners = vec![( + channel_manager_blockhash, + &channel_manager as &(dyn chain::Listen + Send + Sync), + )]; + + for (blockhash, channel_monitor) in channelmonitors.drain(..) { + let outpoint = channel_monitor.get_funding_txo().0; + chain_listener_channel_monitors.push(( + blockhash, + (channel_monitor, broadcaster.clone(), fee_estimator.clone(), logger.clone()), + outpoint, + )); + } + + for monitor_listener_info in chain_listener_channel_monitors.iter_mut() { + chain_listeners.push(( + monitor_listener_info.0, + &monitor_listener_info.1 as &(dyn chain::Listen + Send + Sync), + )); + } + + init::synchronize_listeners( + bitcoind_client.as_ref(), + args.network, + &mut cache, + chain_listeners, + ) + .await + .unwrap() + } else { + polled_chain_tip + }; + + // Step 13: Give ChannelMonitors to ChainMonitor + for item in chain_listener_channel_monitors.drain(..) { + let channel_monitor = item.1 .0; + let funding_outpoint = item.2; + assert_eq!( + chain_monitor.watch_channel(funding_outpoint, channel_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); + } + + // Step 14: Optional: Initialize the P2PGossipSync + let gossip_sync = + Arc::new(P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger))); + + // Step 15: Initialize the PeerManager + let channel_manager: Arc = Arc::new(channel_manager); + let onion_messenger: Arc = Arc::new(OnionMessenger::new( + Arc::clone(&keys_manager), + Arc::clone(&keys_manager), + Arc::clone(&logger), + Arc::new(DefaultMessageRouter {}), + Arc::clone(&channel_manager), + IgnoringMessageHandler {}, + )); + let mut ephemeral_bytes = [0; 32]; + let current_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); + rand::thread_rng().fill_bytes(&mut ephemeral_bytes); + let lightning_msg_handler = MessageHandler { + chan_handler: channel_manager.clone(), + route_handler: gossip_sync.clone(), + onion_message_handler: onion_messenger.clone(), + custom_message_handler: IgnoringMessageHandler {}, + }; + let peer_manager: Arc = Arc::new(PeerManager::new( + lightning_msg_handler, + current_time.try_into().unwrap(), + &ephemeral_bytes, + logger.clone(), + Arc::clone(&keys_manager), + )); + + // Install a GossipVerifier in in the P2PGossipSync + let utxo_lookup = GossipVerifier::new( + Arc::clone(&bitcoind_client.bitcoind_rpc_client), + lightning_block_sync::gossip::TokioSpawner, + Arc::clone(&gossip_sync), + Arc::clone(&peer_manager), + ); + gossip_sync.add_utxo_lookup(Some(utxo_lookup)); + + // ## Running LDK + // Step 16: Initialize networking + + let peer_manager_connection_handler = peer_manager.clone(); + let listening_port = args.ldk_peer_listening_port; + let stop_listen_connect = Arc::new(AtomicBool::new(false)); + let stop_listen = Arc::clone(&stop_listen_connect); + tokio::spawn(async move { + let listener = tokio::net::TcpListener::bind(format!("[::]:{}", listening_port)) + .await + .expect("Failed to bind to listen port - is something else already listening on it?"); + loop { + let peer_mgr = peer_manager_connection_handler.clone(); + let tcp_stream = listener.accept().await.unwrap().0; + if stop_listen.load(Ordering::Acquire) { + return; + } + tokio::spawn(async move { + lightning_net_tokio::setup_inbound( + peer_mgr.clone(), + tcp_stream.into_std().unwrap(), + ) + .await; + }); + } + }); + + // Step 17: Connect and Disconnect Blocks + let channel_manager_listener = channel_manager.clone(); + let chain_monitor_listener = chain_monitor.clone(); + let bitcoind_block_source = bitcoind_client.clone(); + let network = args.network; + tokio::spawn(async move { + let chain_poller = poll::ChainPoller::new(bitcoind_block_source.as_ref(), network); + let chain_listener = (chain_monitor_listener, channel_manager_listener); + let mut spv_client = SpvClient::new(chain_tip, chain_poller, &mut cache, &chain_listener); + loop { + spv_client.poll_best_tip().await.unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + } + }); + + let inbound_payments = Arc::new(Mutex::new(disk::read_payment_info(Path::new(&format!( + "{}/{}", + ldk_data_dir, INBOUND_PAYMENTS_FNAME + ))))); + let outbound_payments = Arc::new(Mutex::new(disk::read_payment_info(Path::new(&format!( + "{}/{}", + ldk_data_dir, OUTBOUND_PAYMENTS_FNAME + ))))); + let recent_payments_payment_hashes = channel_manager + .list_recent_payments() + .into_iter() + .filter_map(|p| match p { + RecentPaymentDetails::Pending { payment_hash, .. } => Some(payment_hash), + RecentPaymentDetails::Fulfilled { payment_hash, .. } => payment_hash, + RecentPaymentDetails::Abandoned { payment_hash, .. } => Some(payment_hash), + RecentPaymentDetails::AwaitingInvoice { payment_id: _ } => todo!(), + }) + .collect::>(); + for (payment_hash, payment_info) in outbound_payments + .lock() + .unwrap() + .payments + .iter_mut() + .filter(|(_, i)| matches!(i.status, HTLCStatus::Pending)) + { + if !recent_payments_payment_hashes.contains(payment_hash) { + payment_info.status = HTLCStatus::Failed; + } + } + fs_store + .write("", "", OUTBOUND_PAYMENTS_FNAME, &outbound_payments.lock().unwrap().encode()) + .unwrap(); + + // Step 18: Handle LDK Events + let channel_manager_event_listener = Arc::clone(&channel_manager); + let bitcoind_client_event_listener = Arc::clone(&bitcoind_client); + let network_graph_event_listener = Arc::clone(&network_graph); + let keys_manager_event_listener = Arc::clone(&keys_manager); + let inbound_payments_event_listener = Arc::clone(&inbound_payments); + let outbound_payments_event_listener = Arc::clone(&outbound_payments); + let fs_store_event_listener = Arc::clone(&fs_store); + let network = args.network; + let event_handler = move |event: Event| { + let channel_manager_event_listener = Arc::clone(&channel_manager_event_listener); + let bitcoind_client_event_listener = Arc::clone(&bitcoind_client_event_listener); + let network_graph_event_listener = Arc::clone(&network_graph_event_listener); + let keys_manager_event_listener = Arc::clone(&keys_manager_event_listener); + let bump_tx_event_handler = Arc::clone(&bump_tx_event_handler); + let inbound_payments_event_listener = Arc::clone(&inbound_payments_event_listener); + let outbound_payments_event_listener = Arc::clone(&outbound_payments_event_listener); + let fs_store_event_listener = Arc::clone(&fs_store_event_listener); + async move { + handle_ldk_events( + &channel_manager_event_listener, + &bitcoind_client_event_listener, + &network_graph_event_listener, + &keys_manager_event_listener, + &bump_tx_event_handler, + inbound_payments_event_listener, + outbound_payments_event_listener, + &fs_store_event_listener, + network, + event, + ) + .await; + } + }; + + // Step 19: Persist ChannelManager and NetworkGraph + let persister = Arc::new(FilesystemStore::new(ldk_data_dir.clone().into())); + + // Step 20: Background Processing + let (bp_exit, bp_exit_check) = tokio::sync::watch::channel(()); + let mut background_processor = tokio::spawn(process_events_async( + Arc::clone(&persister), + event_handler, + chain_monitor.clone(), + channel_manager.clone(), + GossipSync::p2p(gossip_sync.clone()), + peer_manager.clone(), + logger.clone(), + Some(scorer.clone()), + move |t| { + let mut bp_exit_fut_check = bp_exit_check.clone(); + Box::pin(async move { + tokio::select! { + _ = tokio::time::sleep(t) => false, + _ = bp_exit_fut_check.changed() => true, + } + }) + }, + false, + )); + + // Regularly reconnect to channel peers. + let connect_cm = Arc::clone(&channel_manager); + let connect_pm = Arc::clone(&peer_manager); + let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir); + let stop_connect = Arc::clone(&stop_listen_connect); + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(1)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + loop { + interval.tick().await; + match disk::read_channel_peer_data(Path::new(&peer_data_path)) { + Ok(info) => { + let peers = connect_pm.get_peer_node_ids(); + for node_id in connect_cm + .list_channels() + .iter() + .map(|chan| chan.counterparty.node_id) + .filter(|id| !peers.iter().any(|(pk, _)| id == pk)) + { + if stop_connect.load(Ordering::Acquire) { + return; + } + for (pubkey, peer_addr) in info.iter() { + if *pubkey == node_id { + let _ = cli::do_connect_peer( + *pubkey, + peer_addr.clone(), + Arc::clone(&connect_pm), + ) + .await; + } + } + } + } + Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e), + } + } + }); + + // Regularly broadcast our node_announcement. This is only required (or possible) if we have + // some public channels. + let peer_man = Arc::clone(&peer_manager); + let chan_man = Arc::clone(&channel_manager); + let network = args.network; + tokio::spawn(async move { + // First wait a minute until we have some peers and maybe have opened a channel. + tokio::time::sleep(Duration::from_secs(60)).await; + // Then, update our announcement once an hour to keep it fresh but avoid unnecessary churn + // in the global gossip network. + let mut interval = tokio::time::interval(Duration::from_secs(3600)); + loop { + interval.tick().await; + // Don't bother trying to announce if we don't have any public channls, though our + // peers should drop such an announcement anyway. Note that announcement may not + // propagate until we have a channel with 6+ confirmations. + if chan_man.list_channels().iter().any(|chan| chan.is_public) { + peer_man.broadcast_node_announcement( + [0; 3], + args.ldk_announced_node_name, + args.ldk_announced_listen_addr.clone(), + ); + } + } + }); + + tokio::spawn(sweep::periodic_sweep( + ldk_data_dir.clone(), + Arc::clone(&keys_manager), + Arc::clone(&logger), + Arc::clone(&persister), + Arc::clone(&bitcoind_client), + Arc::clone(&channel_manager), + )); + + // Start the CLI. + let cli_channel_manager = Arc::clone(&channel_manager); + let cli_persister = Arc::clone(&persister); + let cli_logger = Arc::clone(&logger); + let cli_peer_manager = Arc::clone(&peer_manager); + let cli_poll = tokio::task::spawn_blocking(move || { + cli::poll_for_user_input( + cli_peer_manager, + cli_channel_manager, + keys_manager, + network_graph, + onion_messenger, + inbound_payments, + outbound_payments, + ldk_data_dir, + network, + cli_logger, + cli_persister, + ) + }); + + // Exit if either CLI polling exits or the background processor exits (which shouldn't happen + // unless we fail to write to the filesystem). + let mut bg_res = Ok(Ok(())); + tokio::select! { + _ = cli_poll => {}, + bg_exit = &mut background_processor => { + bg_res = bg_exit; + }, + } + + // Disconnect our peers and stop accepting new connections. This ensures we don't continue + // updating our channel data after we've stopped the background processor. + stop_listen_connect.store(true, Ordering::Release); + peer_manager.disconnect_all_peers(); + + if let Err(e) = bg_res { + let persist_res = persister + .write( + persist::CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + persist::CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + persist::CHANNEL_MANAGER_PERSISTENCE_KEY, + &channel_manager.encode(), + ) + .unwrap(); + use lightning::util::logger::Logger; + lightning::log_error!( + &*logger, + "Last-ditch ChannelManager persistence result: {:?}", + persist_res + ); + panic!( + "ERR: background processing stopped with result {:?}, exiting.\n\ + Last-ditch ChannelManager persistence result {:?}", + e, persist_res + ); + } + + // Stop the background processor. + if !bp_exit.is_closed() { + bp_exit.send(()).unwrap(); + background_processor.await.unwrap().unwrap(); + } } -fn main() { - let bitcoind_host = "127.0.0.1".to_string(); - let bitcoind_port = 18443; - let rpc_user = "polaruser".to_string(); - let rpc_password = "polarpass".to_string(); - let bitcoind_client = Arc::new(BitcoindClient::new(bitcoind_host.clone(), bitcoind_port, None, - rpc_user.clone(), rpc_password.clone()).unwrap()); - - // ## Setup - // Step 1: Initialize the FeeEstimator - let fee_estimator = bitcoind_client.clone(); - - // Step 2: Initialize the Logger - let logger = Arc::new(FilesystemLogger{}); - - // Step 3: Initialize the BroadcasterInterface - let broadcaster = bitcoind_client.clone(); - - // Step 4: Initialize Persist - let persister = Arc::new(FilesystemPersister::new(".".to_string())); - - // Step 5: Initialize the ChainMonitor - let chain_monitor: Arc = Arc::new(ChainMonitor::new(None, broadcaster.clone(), - logger.clone(), fee_estimator.clone(), - persister.clone())); - - // Step 6: Initialize the KeysManager - let node_privkey = if let Ok(seed) = fs::read("./key_seed") { // the private key that corresponds - assert_eq!(seed.len(), 32); // to our lightning node's pubkey - let mut key = [0; 32]; - key.copy_from_slice(&seed); - key - } else { - let mut key = [0; 32]; - thread_rng().fill_bytes(&mut key); - let mut f = File::create("./key_seed").unwrap(); - f.write_all(&key).expect("Failed to write seed to disk"); - f.sync_all().expect("Failed to sync seed to disk"); - key - }; - let cur = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); - let keys_manager = Arc::new(KeysManager::new(&node_privkey, cur.as_secs(), cur.subsec_nanos())); - - // Step 7: Read ChannelMonitor state from disk - let mut outpoint_to_channelmonitor = read_channelmonitors_from_disk("./monitors".to_string(), - keys_manager.clone()).unwrap(); - - // Step 9: Read ChannelManager state from disk - let user_config = UserConfig::default(); - let mut channel_manager: ChannelManager; - let mut channel_manager_last_blockhash: Option = None; - if let Ok(mut f) = fs::File::open("./manager") { - let (last_block_hash_option, channel_manager_from_disk) = { - let mut channel_monitor_mut_references = Vec::new(); - for (_, channel_monitor) in outpoint_to_channelmonitor.iter_mut() { - channel_monitor_mut_references.push(&mut channel_monitor.1); - } - let read_args = ChannelManagerReadArgs::new(keys_manager.clone(), fee_estimator.clone(), - chain_monitor.clone(), broadcaster.clone(), - logger.clone(), user_config, - channel_monitor_mut_references); - <(Option, ChannelManager)>::read(&mut f, read_args).unwrap() - }; - channel_manager = channel_manager_from_disk; - channel_manager_last_blockhash = last_block_hash_option; - } else { - let mut bitcoind_rpc_client = bitcoind_client.bitcoind_rpc_client.lock().unwrap(); - let current_chain_height: usize = bitcoind_rpc_client - .call_method("getblockchaininfo", &vec![]).unwrap()["blocks"].as_u64().unwrap() as usize; - channel_manager = channelmanager::ChannelManager::new(Network::Regtest, fee_estimator.clone(), - chain_monitor.clone(), broadcaster.clone(), - logger.clone(), keys_manager.clone(), - user_config, current_chain_height); - } - - // Step 10: Sync ChannelMonitors to chain tip if restarting - let mut chain_tip = None; - let mut chain_listener_channel_monitors = Vec::new(); - let mut cache = UnboundedCache::new(); - let rpc_credentials = base64::encode(format!("{}:{}", rpc_user, rpc_password)); - let mut block_source = RpcClient::new(&rpc_credentials, HttpEndpoint::for_host(bitcoind_host) - .with_port(bitcoind_port)).unwrap(); - let runtime = Runtime::new().expect("Unable to create a runtime"); - if outpoint_to_channelmonitor.len() > 0 { - for (outpoint, blockhash_and_monitor) in outpoint_to_channelmonitor.drain() { - let blockhash = blockhash_and_monitor.0; - let channel_monitor = blockhash_and_monitor.1; - chain_listener_channel_monitors.push((blockhash, (RefCell::new(channel_monitor), - broadcaster.clone(), fee_estimator.clone(), - logger.clone()), outpoint)); - } - - let mut chain_listeners = Vec::new(); - for monitor_listener_info in chain_listener_channel_monitors.iter_mut() { - chain_listeners.push((monitor_listener_info.0, - &mut monitor_listener_info.1 as &mut dyn chain::Listen)); - } - // Because `sync_listeners` is an async function and we want to run it synchronously, - // we run it in a tokio Runtime. - chain_tip = Some(runtime.block_on(init::sync_listeners(&mut block_source, Network::Regtest, - &mut cache, chain_listeners)).unwrap()); - } - - // Step 11: Give ChannelMonitors to ChainMonitor - if chain_listener_channel_monitors.len() > 0 { - for item in chain_listener_channel_monitors.drain(..) { - let channel_monitor = item.1.0.into_inner(); - let funding_outpoint = item.2; - chain_monitor.watch_channel(funding_outpoint, channel_monitor).unwrap(); - } - } - - // Step 12: Sync ChannelManager to chain tip if restarting - if let Some(channel_manager_blockhash) = channel_manager_last_blockhash { - let chain_listener = vec![ - (channel_manager_blockhash, &mut channel_manager as &mut dyn chain::Listen)]; - chain_tip = Some(runtime.block_on(init::sync_listeners(&mut block_source, Network::Regtest, - &mut cache, chain_listener)).unwrap()); - } - - // Step 13: Optional: Initialize the NetGraphMsgHandler - // XXX persist routing data - let genesis = genesis_block(Network::Regtest).header.block_hash(); - let router = Arc::new(NetGraphMsgHandler::new(genesis, None::>, logger.clone())); - - // Step 14: Initialize the PeerManager - let channel_manager = Arc::new(channel_manager); - let mut ephemeral_bytes = [0; 32]; - rand::thread_rng().fill_bytes(&mut ephemeral_bytes); - let lightning_msg_handler = MessageHandler { chan_handler: channel_manager.clone(), - route_handler: router.clone() }; - let peer_manager: Arc = Arc::new(PeerManager::new(lightning_msg_handler, - keys_manager.get_node_secret(), - &ephemeral_bytes, logger.clone())); - - // ## Running LDK - // Step 15: Initialize LDK Event Handling - let (event_ntfn_sender, mut event_ntfn_receiver) = mpsc::channel(2); - let peer_manager_event_listener = peer_manager.clone(); - let channel_manager_event_listener = channel_manager.clone(); - let chain_monitor_event_listener = chain_monitor.clone(); - let payment_info: PaymentInfoStorage = Arc::new(Mutex::new(HashMap::new())); - let payment_info_for_events = payment_info.clone(); - thread::spawn(move || async move { - let mut pending_txs = HashMap::new(); - loop { - event_ntfn_receiver.recv().await.unwrap(); - pending_txs = handle_ldk_events(peer_manager_event_listener.clone(), - channel_manager_event_listener.clone(), - chain_monitor_event_listener.clone(), - bitcoind_client.clone(), keys_manager.clone(), - pending_txs, payment_info_for_events.clone()); - } - }); - - // Step 16: Initialize Peer Connection Handling - let peer_manager_connection_handler = peer_manager.clone(); - let event_notifier = event_ntfn_sender.clone(); - thread::spawn(move || async move { - let listener = std::net::TcpListener::bind("0.0.0.0:9735").unwrap(); - loop { - let tcp_stream = listener.accept().unwrap().0; - lightning_net_tokio::setup_inbound(peer_manager_connection_handler.clone(), - event_notifier.clone(), tcp_stream).await; - } - }); - - // Step 17: Connect and Disconnect Blocks - let mut chain_poller = poll::ChainPoller::new(&mut block_source, Network::Regtest); - if chain_tip.is_none() { - match runtime.block_on(chain_poller.poll_chain_tip(None)).unwrap() { - ChainTip::Better(header) => chain_tip = Some(header), - _ => panic!("Unexpected chain tip") - } - } - let chain_listener = (chain_monitor.clone(), channel_manager.clone()); - let _spv_client = SpvClient::new(chain_tip.unwrap(), chain_poller, &mut cache, &chain_listener); - - // Step 17 & 18: Initialize ChannelManager persistence & Once Per Minute: ChannelManager's - // timer_chan_freshness_every_min() and PeerManager's timer_tick_occurred - let persist_channel_manager_callback = move |node: &ChannelManager| { - FilesystemPersister::persist_manager("./".to_string(), &*node) - }; - BackgroundProcessor::start(persist_channel_manager_callback, channel_manager.clone(), logger.clone()); - let peer_manager_processor = peer_manager.clone(); - thread::spawn(move || { - loop { - peer_manager_processor.timer_tick_occured(); - thread::sleep(Duration::new(60, 0)); - } - }); - cli::poll_for_user_input(peer_manager.clone(), channel_manager.clone(), event_ntfn_sender.clone()); +#[tokio::main] +pub async fn main() { + #[cfg(not(target_os = "windows"))] + { + // Catch Ctrl-C with a dummy signal handler. + unsafe { + let mut new_action: libc::sigaction = core::mem::zeroed(); + let mut old_action: libc::sigaction = core::mem::zeroed(); + + extern "C" fn dummy_handler( + _: libc::c_int, _: *const libc::siginfo_t, _: *const libc::c_void, + ) { + } + + new_action.sa_sigaction = dummy_handler as libc::sighandler_t; + new_action.sa_flags = libc::SA_SIGINFO; + + libc::sigaction( + libc::SIGINT, + &new_action as *const libc::sigaction, + &mut old_action as *mut libc::sigaction, + ); + } + } + + start_ldk().await; }