use lightning::chain;
use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
use lightning::chain::chainmonitor;
-use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager};
+use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient};
use lightning::chain::{BestBlock, Filter, Watch};
use lightning::ln::channelmanager;
use lightning::ln::channelmanager::{
use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager};
use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
use lightning::routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
-use lightning::routing::scorer::Scorer;
+use lightning::routing::scoring::ProbabilisticScorer;
use lightning::util::config::UserConfig;
use lightning::util::events::{Event, PaymentPurpose};
use lightning::util::ser::ReadableArgs;
-use lightning_background_processor::BackgroundProcessor;
+use lightning_background_processor::{BackgroundProcessor, Persister};
use lightning_block_sync::init;
use lightning_block_sync::poll;
use lightning_block_sync::SpvClient;
pub(crate) type InvoicePayer<E> = payment::InvoicePayer<
Arc<ChannelManager>,
Router,
- Arc<Mutex<Scorer>>,
+ Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph>>>>,
Arc<FilesystemLogger>,
E,
>;
type Router = DefaultRouter<Arc<NetworkGraph>, Arc<FilesystemLogger>>;
+struct DataPersister {
+ data_dir: String,
+}
+
+impl
+ Persister<
+ InMemorySigner,
+ Arc<ChainMonitor>,
+ Arc<BitcoindClient>,
+ Arc<KeysManager>,
+ Arc<BitcoindClient>,
+ Arc<FilesystemLogger>,
+ > for DataPersister
+{
+ fn persist_manager(&self, channel_manager: &ChannelManager) -> Result<(), std::io::Error> {
+ FilesystemPersister::persist_manager(self.data_dir.clone(), channel_manager)
+ }
+
+ fn persist_graph(&self, network_graph: &NetworkGraph) -> Result<(), std::io::Error> {
+ if FilesystemPersister::persist_network_graph(self.data_dir.clone(), network_graph).is_err()
+ {
+ // Persistence errors here are non-fatal as we can just fetch the routing graph
+ // again later, but they may indicate a disk error which could be fatal elsewhere.
+ eprintln!("Warning: Failed to persist network graph, check your disk and permissions");
+ }
+
+ Ok(())
+ }
+}
+
async fn handle_ldk_events(
channel_manager: Arc<ChannelManager>, bitcoind_client: Arc<BitcoindClient>,
keys_manager: Arc<KeysManager>, inbound_payments: PaymentInfoStorage,
Network::Bitcoin => bitcoin_bech32::constants::Network::Bitcoin,
Network::Testnet => bitcoin_bech32::constants::Network::Testnet,
Network::Regtest => bitcoin_bech32::constants::Network::Regtest,
- Network::Signet => panic!("Signet unsupported"),
+ Network::Signet => bitcoin_bech32::constants::Network::Signet,
},
)
.expect("Lightning funding tx should always be to a SegWit output")
}
}
}
- Event::PaymentSent { payment_preimage, payment_hash, .. } => {
+ Event::PaymentSent { payment_preimage, payment_hash, fee_paid_msat, .. } => {
let mut payments = outbound_payments.lock().unwrap();
for (hash, payment) in payments.iter_mut() {
if *hash == *payment_hash {
payment.preimage = Some(*payment_preimage);
payment.status = HTLCStatus::Succeeded;
println!(
- "\nEVENT: successfully sent payment of {} millisatoshis from \
+ "\nEVENT: successfully sent payment of {} millisatoshis{} from \
payment hash {:?} with preimage {:?}",
payment.amt_msat,
+ if let Some(fee) = fee_paid_msat {
+ format!(" (fee {} msat)", fee)
+ } else {
+ "".to_string()
+ },
hex_utils::hex_str(&payment_hash.0),
hex_utils::hex_str(&payment_preimage.0)
);
}
}
}
- Event::PaymentPathFailed {
- payment_hash,
- rejected_by_dest,
- all_paths_failed,
- short_channel_id,
- ..
- } => {
+ Event::OpenChannelRequest { .. } => {
+ // Unreachable, we don't set manually_accept_inbound_channels
+ }
+ Event::PaymentPathSuccessful { .. } => {}
+ Event::PaymentPathFailed { .. } => {}
+ Event::PaymentFailed { payment_hash, .. } => {
print!(
- "\nEVENT: Failed to send payment{} to payment hash {:?}",
- if *all_paths_failed { "" } else { " along MPP path" },
+ "\nEVENT: Failed to send payment to payment hash {:?}: exhausted payment retry attempts",
hex_utils::hex_str(&payment_hash.0)
);
- if let Some(scid) = short_channel_id {
- print!(" because of failure at channel {}", scid);
- }
- if *rejected_by_dest {
- println!(": re-attempting the payment will not succeed");
- } else {
- println!(": exhausted payment retry attempts");
- }
print!("> ");
io::stdout().flush().unwrap();
let mut user_config = UserConfig::default();
user_config.peer_channel_config_limits.force_announced_channel_preference = false;
let mut restarting_node = true;
- let (channel_manager_blockhash, mut channel_manager) = {
+ let (channel_manager_blockhash, channel_manager) = {
if let Ok(mut f) = fs::File::open(format!("{}/manager", ldk_data_dir.clone())) {
let mut channel_monitor_mut_references = Vec::new();
for (_, channel_monitor) in channelmonitors.iter_mut() {
let mut chain_tip: Option<poll::ValidatedBlockHeader> = None;
if restarting_node {
let mut chain_listeners =
- vec![(channel_manager_blockhash, &mut channel_manager as &mut dyn chain::Listen)];
+ vec![(channel_manager_blockhash, &channel_manager as &dyn chain::Listen)];
for (blockhash, channel_monitor) in channelmonitors.drain(..) {
let outpoint = channel_monitor.get_funding_txo().0;
}
for monitor_listener_info in chain_listener_channel_monitors.iter_mut() {
- chain_listeners.push((
- monitor_listener_info.0,
- &mut monitor_listener_info.1 as &mut dyn chain::Listen,
- ));
+ chain_listeners
+ .push((monitor_listener_info.0, &monitor_listener_info.1 as &dyn chain::Listen));
}
chain_tip = Some(
init::synchronize_listeners(
None::<Arc<dyn chain::Access + Send + Sync>>,
logger.clone(),
));
- let network_graph_persist = Arc::clone(&network_graph);
- tokio::spawn(async move {
- let mut interval = tokio::time::interval(Duration::from_secs(600));
- loop {
- interval.tick().await;
- if disk::persist_network(Path::new(&network_graph_path), &network_graph_persist)
- .is_err()
- {
- // Persistence errors here are non-fatal as we can just fetch the routing graph
- // again later, but they may indicate a disk error which could be fatal elsewhere.
- eprintln!(
- "Warning: Failed to persist network graph, check your disk and permissions"
- );
- }
- }
- });
// Step 12: Initialize the PeerManager
let channel_manager: Arc<ChannelManager> = Arc::new(channel_manager);
};
let peer_manager: Arc<PeerManager> = Arc::new(PeerManager::new(
lightning_msg_handler,
- keys_manager.get_node_secret(),
+ keys_manager.get_node_secret(Recipient::Node).unwrap(),
&ephemeral_bytes,
logger.clone(),
Arc::new(IgnoringMessageHandler {}),
));
};
- // Step 16: Create InvoicePayer
- let router = DefaultRouter::new(network_graph.clone(), logger.clone());
- let scorer = Arc::new(Mutex::new(Scorer::default()));
+ // Step 16: Initialize routing ProbabilisticScorer
+ let scorer_path = format!("{}/prob_scorer", ldk_data_dir.clone());
+ let scorer = Arc::new(Mutex::new(disk::read_scorer(
+ Path::new(&scorer_path),
+ Arc::clone(&network_graph),
+ )));
+ let scorer_persist = Arc::clone(&scorer);
+ tokio::spawn(async move {
+ let mut interval = tokio::time::interval(Duration::from_secs(600));
+ loop {
+ interval.tick().await;
+ if disk::persist_scorer(Path::new(&scorer_path), &scorer_persist.lock().unwrap())
+ .is_err()
+ {
+ // Persistence errors here are non-fatal as channels will be re-scored as payments
+ // fail, but they may indicate a disk error which could be fatal elsewhere.
+ eprintln!("Warning: Failed to persist scorer, check your disk and permissions");
+ }
+ }
+ });
+
+ // Step 17: Create InvoicePayer
+ let router = DefaultRouter::new(
+ network_graph.clone(),
+ logger.clone(),
+ keys_manager.get_secure_random_bytes(),
+ );
let invoice_payer = Arc::new(InvoicePayer::new(
channel_manager.clone(),
router,
payment::RetryAttempts(5),
));
- // Step 17: Persist ChannelManager
- let data_dir = ldk_data_dir.clone();
- let persist_channel_manager_callback =
- move |node: &ChannelManager| FilesystemPersister::persist_manager(data_dir.clone(), &*node);
+ // Step 18: Persist ChannelManager and NetworkGraph
+ let persister = DataPersister { data_dir: ldk_data_dir.clone() };
- // Step 18: Background Processing
+ // Step 19: Background Processing
let background_processor = BackgroundProcessor::start(
- persist_channel_manager_callback,
+ persister,
invoice_payer.clone(),
chain_monitor.clone(),
channel_manager.clone(),
logger.clone(),
);
- // Reconnect to channel peers if possible.
+ // Regularly reconnect to channel peers.
+ let connect_cm = Arc::clone(&channel_manager);
+ let connect_pm = Arc::clone(&peer_manager);
let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir.clone());
- match disk::read_channel_peer_data(Path::new(&peer_data_path)) {
- Ok(mut info) => {
- for (pubkey, peer_addr) in info.drain() {
- for chan_info in channel_manager.list_channels() {
- if pubkey == chan_info.counterparty.node_id {
- let _ =
- cli::connect_peer_if_necessary(pubkey, peer_addr, peer_manager.clone())
+ tokio::spawn(async move {
+ let mut interval = tokio::time::interval(Duration::from_secs(1));
+ loop {
+ interval.tick().await;
+ match disk::read_channel_peer_data(Path::new(&peer_data_path)) {
+ Ok(info) => {
+ let peers = connect_pm.get_peer_node_ids();
+ for node_id in connect_cm
+ .list_channels()
+ .iter()
+ .map(|chan| chan.counterparty.node_id)
+ .filter(|id| !peers.contains(id))
+ {
+ for (pubkey, peer_addr) in info.iter() {
+ if *pubkey == node_id {
+ let _ = cli::do_connect_peer(
+ *pubkey,
+ peer_addr.clone(),
+ Arc::clone(&connect_pm),
+ )
.await;
+ }
+ }
}
}
+ Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e),
}
}
- Err(e) => println!("ERROR: errored reading channel peer info from disk: {:?}", e),
- }
+ });
// Regularly broadcast our node_announcement. This is only required (or possible) if we have
// some public channels, and is only useful if we have public listen address(es) to announce.
peer_manager.clone(),
channel_manager.clone(),
keys_manager.clone(),
- network_graph.clone(),
- scorer.clone(),
inbound_payments,
outbound_payments,
ldk_data_dir.clone(),
- logger.clone(),
network,
)
.await;