lightning::routing::router::benches::generate_large_mpp_routes_with_probabilistic_scorer,
lightning::sign::benches::bench_get_secure_random_bytes,
lightning::ln::channelmanager::bench::bench_sends,
- lightning_persister::bench::bench_sends,
+ lightning_persister::fs_store::bench::bench_sends,
lightning_rapid_gossip_sync::bench::bench_reading_full_graph_from_file,
lightning::routing::gossip::benches::read_network_graph,
lightning::routing::gossip::benches::write_network_graph);
// You may not use this file except in accordance with one or both of these
// licenses.
-use lightning::ln::msgs::NetAddress;
+use lightning::ln::msgs::SocketAddress;
use core::str::FromStr;
use crate::utils::test_logger;
#[inline]
pub fn do_test(data: &[u8]) {
if let Ok(s) = std::str::from_utf8(data) {
- let _ = NetAddress::from_str(s);
+ let _ = SocketAddress::from_str(s);
}
}
/// For example, in order to process background events in a [Tokio](https://tokio.rs/) task, you
/// could setup `process_events_async` like this:
/// ```
-/// # struct MyPersister {}
-/// # impl lightning::util::persist::KVStorePersister for MyPersister {
-/// # fn persist<W: lightning::util::ser::Writeable>(&self, key: &str, object: &W) -> lightning::io::Result<()> { Ok(()) }
+/// # use lightning::io;
+/// # use std::sync::{Arc, Mutex};
+/// # use std::sync::atomic::{AtomicBool, Ordering};
+/// # use lightning_background_processor::{process_events_async, GossipSync};
+/// # struct MyStore {}
+/// # impl lightning::util::persist::KVStore for MyStore {
+/// # fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> { Ok(Vec::new()) }
+/// # fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) }
+/// # fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) }
+/// # fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> { Ok(Vec::new()) }
/// # }
/// # struct MyEventHandler {}
/// # impl MyEventHandler {
/// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 }
/// # fn disconnect_socket(&mut self) {}
/// # }
-/// # use std::sync::{Arc, Mutex};
-/// # use std::sync::atomic::{AtomicBool, Ordering};
-/// # use lightning_background_processor::{process_events_async, GossipSync};
/// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface + Send + Sync;
/// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator + Send + Sync;
/// # type MyNodeSigner = dyn lightning::sign::NodeSigner + Send + Sync;
/// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup + Send + Sync;
/// # type MyFilter = dyn lightning::chain::Filter + Send + Sync;
/// # type MyLogger = dyn lightning::util::logger::Logger + Send + Sync;
-/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyPersister>>;
+/// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor<lightning::sign::InMemorySigner, Arc<MyFilter>, Arc<MyBroadcaster>, Arc<MyFeeEstimator>, Arc<MyLogger>, Arc<MyStore>>;
/// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager<MySocketDescriptor, MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyUtxoLookup, MyLogger>;
/// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<MyLogger>>;
/// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync<Arc<MyNetworkGraph>, Arc<MyUtxoLookup>, Arc<MyLogger>>;
/// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager<MyChainMonitor, MyBroadcaster, MyFeeEstimator, MyLogger>;
/// # type MyScorer = Mutex<lightning::routing::scoring::ProbabilisticScorer<Arc<MyNetworkGraph>, Arc<MyLogger>>>;
///
-/// # async fn setup_background_processing(my_persister: Arc<MyPersister>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
+/// # async fn setup_background_processing(my_persister: Arc<MyStore>, my_event_handler: Arc<MyEventHandler>, my_chain_monitor: Arc<MyChainMonitor>, my_channel_manager: Arc<MyChannelManager>, my_gossip_sync: Arc<MyGossipSync>, my_logger: Arc<MyLogger>, my_scorer: Arc<MyScorer>, my_peer_manager: Arc<MyPeerManager>) {
/// let background_persister = Arc::clone(&my_persister);
/// let background_event_handler = Arc::clone(&my_event_handler);
/// let background_chain_mon = Arc::clone(&my_chain_monitor);
use lightning::util::config::UserConfig;
use lightning::util::ser::Writeable;
use lightning::util::test_utils;
- use lightning::util::persist::KVStorePersister;
- use lightning_persister::FilesystemPersister;
+ use lightning::util::persist::{KVStore, CHANNEL_MANAGER_PERSISTENCE_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE, CHANNEL_MANAGER_PERSISTENCE_KEY, NETWORK_GRAPH_PERSISTENCE_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, SCORER_PERSISTENCE_NAMESPACE, SCORER_PERSISTENCE_SUB_NAMESPACE, SCORER_PERSISTENCE_KEY};
+ use lightning_persister::fs_store::FilesystemStore;
use std::collections::VecDeque;
use std::{fs, env};
use std::path::PathBuf;
>,
Arc<test_utils::TestLogger>>;
- type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemPersister>>;
+ type ChainMonitor = chainmonitor::ChainMonitor<InMemorySigner, Arc<test_utils::TestChainSource>, Arc<test_utils::TestBroadcaster>, Arc<test_utils::TestFeeEstimator>, Arc<test_utils::TestLogger>, Arc<FilesystemStore>>;
type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
rapid_gossip_sync: RGS,
peer_manager: Arc<PeerManager<TestDescriptor, Arc<test_utils::TestChannelMessageHandler>, Arc<test_utils::TestRoutingMessageHandler>, IgnoringMessageHandler, Arc<test_utils::TestLogger>, IgnoringMessageHandler, Arc<KeysManager>>>,
chain_monitor: Arc<ChainMonitor>,
- persister: Arc<FilesystemPersister>,
+ kv_store: Arc<FilesystemStore>,
tx_broadcaster: Arc<test_utils::TestBroadcaster>,
network_graph: Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
logger: Arc<test_utils::TestLogger>,
impl Drop for Node {
fn drop(&mut self) {
- let data_dir = self.persister.get_data_dir();
+ let data_dir = self.kv_store.get_data_dir();
match fs::remove_dir_all(data_dir.clone()) {
- Err(e) => println!("Failed to remove test persister directory {}: {}", data_dir, e),
+ Err(e) => println!("Failed to remove test store directory {}: {}", data_dir.display(), e),
_ => {}
}
}
graph_persistence_notifier: Option<SyncSender<()>>,
manager_error: Option<(std::io::ErrorKind, &'static str)>,
scorer_error: Option<(std::io::ErrorKind, &'static str)>,
- filesystem_persister: FilesystemPersister,
+ kv_store: FilesystemStore,
}
impl Persister {
- fn new(data_dir: String) -> Self {
- let filesystem_persister = FilesystemPersister::new(data_dir);
- Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, filesystem_persister }
+ fn new(data_dir: PathBuf) -> Self {
+ let kv_store = FilesystemStore::new(data_dir);
+ Self { graph_error: None, graph_persistence_notifier: None, manager_error: None, scorer_error: None, kv_store }
}
fn with_graph_error(self, error: std::io::ErrorKind, message: &'static str) -> Self {
}
}
- impl KVStorePersister for Persister {
- fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
- if key == "manager" {
+ impl KVStore for Persister {
+ fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> lightning::io::Result<Vec<u8>> {
+ self.kv_store.read(namespace, sub_namespace, key)
+ }
+
+ fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> lightning::io::Result<()> {
+ if namespace == CHANNEL_MANAGER_PERSISTENCE_NAMESPACE &&
+ sub_namespace == CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE &&
+ key == CHANNEL_MANAGER_PERSISTENCE_KEY
+ {
if let Some((error, message)) = self.manager_error {
return Err(std::io::Error::new(error, message))
}
}
- if key == "network_graph" {
+ if namespace == NETWORK_GRAPH_PERSISTENCE_NAMESPACE &&
+ sub_namespace == NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE &&
+ key == NETWORK_GRAPH_PERSISTENCE_KEY
+ {
if let Some(sender) = &self.graph_persistence_notifier {
match sender.send(()) {
Ok(()) => {},
}
}
- if key == "scorer" {
+ if namespace == SCORER_PERSISTENCE_NAMESPACE &&
+ sub_namespace == SCORER_PERSISTENCE_SUB_NAMESPACE &&
+ key == SCORER_PERSISTENCE_KEY
+ {
if let Some((error, message)) = self.scorer_error {
return Err(std::io::Error::new(error, message))
}
}
- self.filesystem_persister.persist(key, object)
+ self.kv_store.write(namespace, sub_namespace, key, buf)
+ }
+
+ fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> lightning::io::Result<()> {
+ self.kv_store.remove(namespace, sub_namespace, key, lazy)
+ }
+
+ fn list(&self, namespace: &str, sub_namespace: &str) -> lightning::io::Result<Vec<String>> {
+ self.kv_store.list(namespace, sub_namespace)
}
}
let seed = [i as u8; 32];
let router = Arc::new(DefaultRouter::new(network_graph.clone(), logger.clone(), seed, scorer.clone(), ()));
let chain_source = Arc::new(test_utils::TestChainSource::new(Network::Bitcoin));
- let persister = Arc::new(FilesystemPersister::new(format!("{}_persister_{}", &persist_dir, i)));
+ let kv_store = Arc::new(FilesystemStore::new(format!("{}_persister_{}", &persist_dir, i).into()));
let now = Duration::from_secs(genesis_block.header.time as u64);
let keys_manager = Arc::new(KeysManager::new(&seed, now.as_secs(), now.subsec_nanos()));
- let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), persister.clone()));
+ let chain_monitor = Arc::new(chainmonitor::ChainMonitor::new(Some(chain_source.clone()), tx_broadcaster.clone(), logger.clone(), fee_estimator.clone(), kv_store.clone()));
let best_block = BestBlock::from_network(network);
let params = ChainParameters { network, best_block };
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
onion_message_handler: IgnoringMessageHandler{}, custom_message_handler: IgnoringMessageHandler{}
};
let peer_manager = Arc::new(PeerManager::new(msg_handler, 0, &seed, logger.clone(), keys_manager.clone()));
- let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, persister, tx_broadcaster, network_graph, logger, best_block, scorer };
+ let node = Node { node: manager, p2p_gossip_sync, rapid_gossip_sync, peer_manager, chain_monitor, kv_store, tx_broadcaster, network_graph, logger, best_block, scorer };
nodes.push(node);
}
let tx = open_channel!(nodes[0], nodes[1], 100000);
// Initiate the background processors to watch each node.
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
// `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and
// `PeerManager::timer_tick_occurred` every `PING_TIMER`.
let (_, nodes) = create_nodes(1, "test_timer_tick_called");
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
let (_, nodes) = create_nodes(2, "test_persist_error");
open_channel!(nodes[0], nodes[1], 100000);
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
let (_, nodes) = create_nodes(2, "test_persist_error_sync");
open_channel!(nodes[0], nodes[1], 100000);
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_manager_error(std::io::ErrorKind::Other, "test"));
let bp_future = super::process_events_async(
fn test_network_graph_persist_error() {
// Test that if we encounter an error during network graph persistence, an error gets returned.
let (_, nodes) = create_nodes(2, "test_persist_network_graph_error");
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_graph_error(std::io::ErrorKind::Other, "test"));
let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].p2p_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
fn test_scorer_persist_error() {
// Test that if we encounter an error during scorer persistence, an error gets returned.
let (_, nodes) = create_nodes(2, "test_persist_scorer_error");
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_scorer_error(std::io::ErrorKind::Other, "test"));
let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
fn test_background_event_handling() {
let (_, mut nodes) = create_nodes(2, "test_background_event_handling");
let channel_value = 100000;
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir.clone()));
// Set up a background event handler for FundingGenerationReady events.
#[test]
fn test_scorer_persistence() {
let (_, nodes) = create_nodes(2, "test_scorer_persistence");
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let event_handler = |_: _| {};
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion");
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
let event_handler = |_: _| {};
let (sender, receiver) = std::sync::mpsc::sync_channel(1);
let (_, nodes) = create_nodes(2, "test_not_pruning_network_graph_until_graph_sync_completion_async");
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir).with_graph_persistence_notifier(sender));
let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
};
let (_, nodes) = create_nodes(1, "test_payment_path_scoring");
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));
};
let (_, nodes) = create_nodes(1, "test_payment_path_scoring_async");
- let data_dir = nodes[0].persister.get_data_dir();
+ let data_dir = nodes[0].kv_store.get_data_dir();
let persister = Arc::new(Persister::new(data_dir));
let (exit_sender, exit_receiver) = tokio::sync::watch::channel(());
use lightning::ln::peer_handler;
use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait;
use lightning::ln::peer_handler::APeerManager;
-use lightning::ln::msgs::NetAddress;
+use lightning::ln::msgs::SocketAddress;
use std::ops::Deref;
use std::task::{self, Poll};
}
}
-fn get_addr_from_stream(stream: &StdTcpStream) -> Option<NetAddress> {
+fn get_addr_from_stream(stream: &StdTcpStream) -> Option<SocketAddress> {
match stream.peer_addr() {
- Ok(SocketAddr::V4(sockaddr)) => Some(NetAddress::IPv4 {
+ Ok(SocketAddr::V4(sockaddr)) => Some(SocketAddress::TcpIpV4 {
addr: sockaddr.ip().octets(),
port: sockaddr.port(),
}),
- Ok(SocketAddr::V6(sockaddr)) => Some(NetAddress::IPv6 {
+ Ok(SocketAddr::V6(sockaddr)) => Some(SocketAddress::TcpIpV6 {
addr: sockaddr.ip().octets(),
port: sockaddr.port(),
}),
version = "0.0.116"
authors = ["Valentine Wallace", "Matt Corallo"]
license = "MIT OR Apache-2.0"
-repository = "https://github.com/lightningdevkit/rust-lightning/"
+repository = "https://github.com/lightningdevkit/rust-lightning"
description = """
-Utilities to manage Rust-Lightning channel data persistence and retrieval.
+Utilities for LDK data persistence and retrieval.
"""
edition = "2018"
[dependencies]
bitcoin = "0.29.0"
lightning = { version = "0.0.116", path = "../lightning" }
-libc = "0.2"
[target.'cfg(windows)'.dependencies]
-winapi = { version = "0.3", features = ["winbase"] }
+windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] }
[target.'cfg(ldk_bench)'.dependencies]
criterion = { version = "0.4", optional = true, default-features = false }
[dev-dependencies]
lightning = { version = "0.0.116", path = "../lightning", features = ["_test_utils"] }
+bitcoin = { version = "0.29.0", default-features = false }
--- /dev/null
+//! Objects related to [`FilesystemStore`] live here.
+use crate::utils::{check_namespace_key_validity, is_valid_kvstore_str};
+
+use lightning::util::persist::KVStore;
+use lightning::util::string::PrintableString;
+
+use std::collections::HashMap;
+use std::fs;
+use std::io::{Read, Write};
+use std::path::{Path, PathBuf};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::{Arc, Mutex, RwLock};
+
+#[cfg(target_os = "windows")]
+use {std::ffi::OsStr, std::os::windows::ffi::OsStrExt};
+
+#[cfg(target_os = "windows")]
+macro_rules! call {
+ ($e: expr) => {
+ if $e != 0 {
+ Ok(())
+ } else {
+ Err(std::io::Error::last_os_error())
+ }
+ };
+}
+
+#[cfg(target_os = "windows")]
+fn path_to_windows_str<T: AsRef<OsStr>>(path: &T) -> Vec<u16> {
+ path.as_ref().encode_wide().chain(Some(0)).collect()
+}
+
+// The number of read/write/remove/list operations after which we clean up our `locks` HashMap.
+const GC_LOCK_INTERVAL: usize = 25;
+
+/// A [`KVStore`] implementation that writes to and reads from the file system.
+pub struct FilesystemStore {
+ data_dir: PathBuf,
+ tmp_file_counter: AtomicUsize,
+ gc_counter: AtomicUsize,
+ locks: Mutex<HashMap<PathBuf, Arc<RwLock<()>>>>,
+}
+
+impl FilesystemStore {
+ /// Constructs a new [`FilesystemStore`].
+ pub fn new(data_dir: PathBuf) -> Self {
+ let locks = Mutex::new(HashMap::new());
+ let tmp_file_counter = AtomicUsize::new(0);
+ let gc_counter = AtomicUsize::new(1);
+ Self { data_dir, tmp_file_counter, gc_counter, locks }
+ }
+
+ /// Returns the data directory.
+ pub fn get_data_dir(&self) -> PathBuf {
+ self.data_dir.clone()
+ }
+
+ fn garbage_collect_locks(&self) {
+ let gc_counter = self.gc_counter.fetch_add(1, Ordering::AcqRel);
+
+ if gc_counter % GC_LOCK_INTERVAL == 0 {
+ // Take outer lock for the cleanup.
+ let mut outer_lock = self.locks.lock().unwrap();
+
+ // Garbage collect all lock entries that are not referenced anymore.
+ outer_lock.retain(|_, v| Arc::strong_count(&v) > 1);
+ }
+ }
+
+ fn get_dest_dir_path(&self, namespace: &str, sub_namespace: &str) -> std::io::Result<PathBuf> {
+ let mut dest_dir_path = {
+ #[cfg(target_os = "windows")]
+ {
+ let data_dir = self.data_dir.clone();
+ fs::create_dir_all(data_dir.clone())?;
+ fs::canonicalize(data_dir)?
+ }
+ #[cfg(not(target_os = "windows"))]
+ {
+ self.data_dir.clone()
+ }
+ };
+
+ dest_dir_path.push(namespace);
+ if !sub_namespace.is_empty() {
+ dest_dir_path.push(sub_namespace);
+ }
+
+ Ok(dest_dir_path)
+ }
+}
+
+impl KVStore for FilesystemStore {
+ fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> std::io::Result<Vec<u8>> {
+ check_namespace_key_validity(namespace, sub_namespace, Some(key), "read")?;
+
+ let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+ dest_file_path.push(key);
+
+ let mut buf = Vec::new();
+ {
+ let inner_lock_ref = {
+ let mut outer_lock = self.locks.lock().unwrap();
+ Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+ };
+ let _guard = inner_lock_ref.read().unwrap();
+
+ let mut f = fs::File::open(dest_file_path)?;
+ f.read_to_end(&mut buf)?;
+ }
+
+ self.garbage_collect_locks();
+
+ Ok(buf)
+ }
+
+ fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> std::io::Result<()> {
+ check_namespace_key_validity(namespace, sub_namespace, Some(key), "write")?;
+
+ let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+ dest_file_path.push(key);
+
+ let parent_directory = dest_file_path
+ .parent()
+ .ok_or_else(|| {
+ let msg =
+ format!("Could not retrieve parent directory of {}.", dest_file_path.display());
+ std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
+ })?;
+ fs::create_dir_all(&parent_directory)?;
+
+ // Do a crazy dance with lots of fsync()s to be overly cautious here...
+ // We never want to end up in a state where we've lost the old data, or end up using the
+ // old data on power loss after we've returned.
+ // The way to atomically write a file on Unix platforms is:
+ // open(tmpname), write(tmpfile), fsync(tmpfile), close(tmpfile), rename(), fsync(dir)
+ let mut tmp_file_path = dest_file_path.clone();
+ let tmp_file_ext = format!("{}.tmp", self.tmp_file_counter.fetch_add(1, Ordering::AcqRel));
+ tmp_file_path.set_extension(tmp_file_ext);
+
+ {
+ let mut tmp_file = fs::File::create(&tmp_file_path)?;
+ tmp_file.write_all(&buf)?;
+ tmp_file.sync_all()?;
+ }
+
+ let res = {
+ let inner_lock_ref = {
+ let mut outer_lock = self.locks.lock().unwrap();
+ Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+ };
+ let _guard = inner_lock_ref.write().unwrap();
+
+ #[cfg(not(target_os = "windows"))]
+ {
+ fs::rename(&tmp_file_path, &dest_file_path)?;
+ let dir_file = fs::OpenOptions::new().read(true).open(&parent_directory)?;
+ dir_file.sync_all()?;
+ Ok(())
+ }
+
+ #[cfg(target_os = "windows")]
+ {
+ let res = if dest_file_path.exists() {
+ call!(unsafe {
+ windows_sys::Win32::Storage::FileSystem::ReplaceFileW(
+ path_to_windows_str(&dest_file_path).as_ptr(),
+ path_to_windows_str(&tmp_file_path).as_ptr(),
+ std::ptr::null(),
+ windows_sys::Win32::Storage::FileSystem::REPLACEFILE_IGNORE_MERGE_ERRORS,
+ std::ptr::null_mut() as *const core::ffi::c_void,
+ std::ptr::null_mut() as *const core::ffi::c_void,
+ )
+ })
+ } else {
+ call!(unsafe {
+ windows_sys::Win32::Storage::FileSystem::MoveFileExW(
+ path_to_windows_str(&tmp_file_path).as_ptr(),
+ path_to_windows_str(&dest_file_path).as_ptr(),
+ windows_sys::Win32::Storage::FileSystem::MOVEFILE_WRITE_THROUGH
+ | windows_sys::Win32::Storage::FileSystem::MOVEFILE_REPLACE_EXISTING,
+ )
+ })
+ };
+
+ match res {
+ Ok(()) => {
+ // We fsync the dest file in hopes this will also flush the metadata to disk.
+ let dest_file = fs::OpenOptions::new().read(true).write(true)
+ .open(&dest_file_path)?;
+ dest_file.sync_all()?;
+ Ok(())
+ }
+ Err(e) => Err(e),
+ }
+ }
+ };
+
+ self.garbage_collect_locks();
+
+ res
+ }
+
+ fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> std::io::Result<()> {
+ check_namespace_key_validity(namespace, sub_namespace, Some(key), "remove")?;
+
+ let mut dest_file_path = self.get_dest_dir_path(namespace, sub_namespace)?;
+ dest_file_path.push(key);
+
+ if !dest_file_path.is_file() {
+ return Ok(());
+ }
+
+ {
+ let inner_lock_ref = {
+ let mut outer_lock = self.locks.lock().unwrap();
+ Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default())
+ };
+ let _guard = inner_lock_ref.write().unwrap();
+
+ if lazy {
+ // If we're lazy we just call remove and be done with it.
+ fs::remove_file(&dest_file_path)?;
+ } else {
+ // If we're not lazy we try our best to persist the updated metadata to ensure
+ // atomicity of this call.
+ #[cfg(not(target_os = "windows"))]
+ {
+ fs::remove_file(&dest_file_path)?;
+
+ let parent_directory = dest_file_path.parent().ok_or_else(|| {
+ let msg =
+ format!("Could not retrieve parent directory of {}.", dest_file_path.display());
+ std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)
+ })?;
+ let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
+ // The above call to `fs::remove_file` corresponds to POSIX `unlink`, whose changes
+ // to the inode might get cached (and hence possibly lost on crash), depending on
+ // the target platform and file system.
+ //
+ // In order to assert we permanently removed the file in question we therefore
+ // call `fsync` on the parent directory on platforms that support it.
+ dir_file.sync_all()?;
+ }
+
+ #[cfg(target_os = "windows")]
+ {
+ // Since Windows `DeleteFile` API is not persisted until the last open file handle
+ // is dropped, and there seemingly is no reliable way to flush the directory
+ // metadata, we here fall back to use a 'recycling bin' model, i.e., first move the
+ // file to be deleted to a temporary trash file and remove the latter file
+ // afterwards.
+ //
+ // This should be marginally better, as, according to the documentation,
+ // `MoveFileExW` APIs should offer stronger persistence guarantees,
+ // at least if `MOVEFILE_WRITE_THROUGH`/`MOVEFILE_REPLACE_EXISTING` is set.
+ // However, all this is partially based on assumptions and local experiments, as
+ // Windows API is horribly underdocumented.
+ let mut trash_file_path = dest_file_path.clone();
+ let trash_file_ext = format!("{}.trash",
+ self.tmp_file_counter.fetch_add(1, Ordering::AcqRel));
+ trash_file_path.set_extension(trash_file_ext);
+
+ call!(unsafe {
+ windows_sys::Win32::Storage::FileSystem::MoveFileExW(
+ path_to_windows_str(&dest_file_path).as_ptr(),
+ path_to_windows_str(&trash_file_path).as_ptr(),
+ windows_sys::Win32::Storage::FileSystem::MOVEFILE_WRITE_THROUGH
+ | windows_sys::Win32::Storage::FileSystem::MOVEFILE_REPLACE_EXISTING,
+ )
+ })?;
+
+ {
+ // We fsync the trash file in hopes this will also flush the original's file
+ // metadata to disk.
+ let trash_file = fs::OpenOptions::new().read(true).write(true)
+ .open(&trash_file_path.clone())?;
+ trash_file.sync_all()?;
+ }
+
+ // We're fine if this remove would fail as the trash file will be cleaned up in
+ // list eventually.
+ fs::remove_file(trash_file_path).ok();
+ }
+ }
+ }
+
+ self.garbage_collect_locks();
+
+ Ok(())
+ }
+
+ fn list(&self, namespace: &str, sub_namespace: &str) -> std::io::Result<Vec<String>> {
+ check_namespace_key_validity(namespace, sub_namespace, None, "list")?;
+
+ let prefixed_dest = self.get_dest_dir_path(namespace, sub_namespace)?;
+ let mut keys = Vec::new();
+
+ if !Path::new(&prefixed_dest).exists() {
+ return Ok(Vec::new());
+ }
+
+ for entry in fs::read_dir(&prefixed_dest)? {
+ let entry = entry?;
+ let p = entry.path();
+
+ if let Some(ext) = p.extension() {
+ #[cfg(target_os = "windows")]
+ {
+ // Clean up any trash files lying around.
+ if ext == "trash" {
+ fs::remove_file(p).ok();
+ continue;
+ }
+ }
+ if ext == "tmp" {
+ continue;
+ }
+ }
+
+ let metadata = p.metadata()?;
+
+ // We allow the presence of directories in the empty namespace and just skip them.
+ if metadata.is_dir() {
+ continue;
+ }
+
+ // If we otherwise don't find a file at the given path something went wrong.
+ if !metadata.is_file() {
+ debug_assert!(false, "Failed to list keys of {}/{}: file couldn't be accessed.",
+ PrintableString(namespace), PrintableString(sub_namespace));
+ let msg = format!("Failed to list keys of {}/{}: file couldn't be accessed.",
+ PrintableString(namespace), PrintableString(sub_namespace));
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+ }
+
+ match p.strip_prefix(&prefixed_dest) {
+ Ok(stripped_path) => {
+ if let Some(relative_path) = stripped_path.to_str() {
+ if is_valid_kvstore_str(relative_path) {
+ keys.push(relative_path.to_string())
+ }
+ } else {
+ debug_assert!(false, "Failed to list keys of {}/{}: file path is not valid UTF-8",
+ PrintableString(namespace), PrintableString(sub_namespace));
+ let msg = format!("Failed to list keys of {}/{}: file path is not valid UTF-8",
+ PrintableString(namespace), PrintableString(sub_namespace));
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+ }
+ }
+ Err(e) => {
+ debug_assert!(false, "Failed to list keys of {}/{}: {}",
+ PrintableString(namespace), PrintableString(sub_namespace), e);
+ let msg = format!("Failed to list keys of {}/{}: {}",
+ PrintableString(namespace), PrintableString(sub_namespace), e);
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+ }
+ }
+ }
+
+ self.garbage_collect_locks();
+
+ Ok(keys)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::test_utils::{do_read_write_remove_list_persist, do_test_store};
+
+ use bitcoin::hashes::hex::FromHex;
+ use bitcoin::Txid;
+
+ use lightning::chain::ChannelMonitorUpdateStatus;
+ use lightning::chain::chainmonitor::Persist;
+ use lightning::chain::transaction::OutPoint;
+ use lightning::check_closed_event;
+ use lightning::events::{ClosureReason, MessageSendEventsProvider};
+ use lightning::ln::functional_test_utils::*;
+ use lightning::util::test_utils;
+ use lightning::util::persist::read_channel_monitors;
+ use std::fs;
+ #[cfg(target_os = "windows")]
+ use {
+ lightning::get_event_msg,
+ lightning::ln::msgs::ChannelMessageHandler,
+ };
+
+ impl Drop for FilesystemStore {
+ fn drop(&mut self) {
+ // We test for invalid directory names, so it's OK if directory removal
+ // fails.
+ match fs::remove_dir_all(&self.data_dir) {
+ Err(e) => println!("Failed to remove test persister directory: {}", e),
+ _ => {}
+ }
+ }
+ }
+
+ #[test]
+ fn read_write_remove_list_persist() {
+ let mut temp_path = std::env::temp_dir();
+ temp_path.push("test_read_write_remove_list_persist");
+ let fs_store = FilesystemStore::new(temp_path);
+ do_read_write_remove_list_persist(&fs_store);
+ }
+
+ #[test]
+ fn test_if_monitors_is_not_dir() {
+ let store = FilesystemStore::new("test_monitors_is_not_dir".into());
+
+ fs::create_dir_all(&store.get_data_dir()).unwrap();
+ let mut path = std::path::PathBuf::from(&store.get_data_dir());
+ path.push("monitors");
+ fs::File::create(path).unwrap();
+
+ let chanmon_cfgs = create_chanmon_cfgs(1);
+ let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
+ let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &store, node_cfgs[0].keys_manager);
+ node_cfgs[0].chain_monitor = chain_mon_0;
+ let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
+ let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
+
+ // Check that read_channel_monitors() returns error if monitors/ is not a
+ // directory.
+ assert!(read_channel_monitors(&store, nodes[0].keys_manager, nodes[0].keys_manager).is_err());
+ }
+
+ #[test]
+ fn test_filesystem_store() {
+ // Create the nodes, giving them FilesystemStores for data stores.
+ let store_0 = FilesystemStore::new("test_filesystem_store_0".into());
+ let store_1 = FilesystemStore::new("test_filesystem_store_1".into());
+ do_test_store(&store_0, &store_1)
+ }
+
+ // Test that if the store's path to channel data is read-only, writing a
+ // monitor to it results in the store returning a PermanentFailure.
+ // Windows ignores the read-only flag for folders, so this test is Unix-only.
+ #[cfg(not(target_os = "windows"))]
+ #[test]
+ fn test_readonly_dir_perm_failure() {
+ let store = FilesystemStore::new("test_readonly_dir_perm_failure".into());
+ fs::create_dir_all(&store.get_data_dir()).unwrap();
+
+ // Set up a dummy channel and force close. This will produce a monitor
+ // that we can then use to test persistence.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+ let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
+ let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+
+ // Set the store's directory to read-only, which should result in
+ // returning a permanent failure when we then attempt to persist a
+ // channel update.
+ let path = &store.get_data_dir();
+ let mut perms = fs::metadata(path).unwrap().permissions();
+ perms.set_readonly(true);
+ fs::set_permissions(path, perms).unwrap();
+
+ let test_txo = OutPoint {
+ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
+ index: 0
+ };
+ match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+ ChannelMonitorUpdateStatus::PermanentFailure => {},
+ _ => panic!("unexpected result from persisting new channel")
+ }
+
+ nodes[1].node.get_and_clear_pending_msg_events();
+ added_monitors.clear();
+ }
+
+ // Test that if a store's directory name is invalid, monitor persistence
+ // will fail.
+ #[cfg(target_os = "windows")]
+ #[test]
+ fn test_fail_on_open() {
+ // Set up a dummy channel and force close. This will produce a monitor
+ // that we can then use to test persistence.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+ nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
+ let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
+ let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
+
+ // Create the store with an invalid directory name and test that the
+ // channel fails to open because the directories fail to be created. There
+ // don't seem to be invalid filename characters on Unix that Rust doesn't
+ // handle, hence why the test is Windows-only.
+ let store = FilesystemStore::new(":<>/".into());
+
+ let test_txo = OutPoint {
+ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
+ index: 0
+ };
+ match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
+ ChannelMonitorUpdateStatus::PermanentFailure => {},
+ _ => panic!("unexpected result from persisting new channel")
+ }
+
+ nodes[1].node.get_and_clear_pending_msg_events();
+ added_monitors.clear();
+ }
+}
+
+#[cfg(ldk_bench)]
+/// Benches
+pub mod bench {
+ use criterion::Criterion;
+
+ /// Bench!
+ pub fn bench_sends(bench: &mut Criterion) {
+ let store_a = super::FilesystemStore::new("bench_filesystem_store_a".into());
+ let store_b = super::FilesystemStore::new("bench_filesystem_store_b".into());
+ lightning::ln::channelmanager::bench::bench_two_sends(
+ bench, "bench_filesystem_persisted_sends", store_a, store_b);
+ }
+}
-//! Utilities that handle persisting Rust-Lightning data to disk via standard filesystem APIs.
-
-// Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
+//! Provides utilities for LDK data persistence and retrieval.
+//
+// TODO: Prefix these with `rustdoc::` when we update our MSRV to be >= 1.52 to remove warnings.
#![deny(broken_intra_doc_links)]
#![deny(private_intra_doc_links)]
#[cfg(ldk_bench)] extern crate criterion;
-mod util;
-
-extern crate lightning;
-extern crate bitcoin;
-extern crate libc;
-
-use bitcoin::hash_types::{BlockHash, Txid};
-use bitcoin::hashes::hex::FromHex;
-use lightning::chain::channelmonitor::ChannelMonitor;
-use lightning::sign::{EntropySource, SignerProvider};
-use lightning::util::ser::{ReadableArgs, Writeable};
-use lightning::util::persist::KVStorePersister;
-use std::fs;
-use std::io::Cursor;
-use std::ops::Deref;
-use std::path::{Path, PathBuf};
-
-/// FilesystemPersister persists channel data on disk, where each channel's
-/// data is stored in a file named after its funding outpoint.
-///
-/// Warning: this module does the best it can with calls to persist data, but it
-/// can only guarantee that the data is passed to the drive. It is up to the
-/// drive manufacturers to do the actual persistence properly, which they often
-/// don't (especially on consumer-grade hardware). Therefore, it is up to the
-/// user to validate their entire storage stack, to ensure the writes are
-/// persistent.
-/// Corollary: especially when dealing with larger amounts of money, it is best
-/// practice to have multiple channel data backups and not rely only on one
-/// FilesystemPersister.
-pub struct FilesystemPersister {
- path_to_channel_data: String,
-}
-
-impl FilesystemPersister {
- /// Initialize a new FilesystemPersister and set the path to the individual channels'
- /// files.
- pub fn new(path_to_channel_data: String) -> Self {
- Self {
- path_to_channel_data,
- }
- }
-
- /// Get the directory which was provided when this persister was initialized.
- pub fn get_data_dir(&self) -> String {
- self.path_to_channel_data.clone()
- }
-
- /// Read `ChannelMonitor`s from disk.
- pub fn read_channelmonitors<ES: Deref, SP: Deref> (
- &self, entropy_source: ES, signer_provider: SP
- ) -> std::io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
- where
- ES::Target: EntropySource + Sized,
- SP::Target: SignerProvider + Sized
- {
- let mut path = PathBuf::from(&self.path_to_channel_data);
- path.push("monitors");
- if !Path::new(&path).exists() {
- return Ok(Vec::new());
- }
- let mut res = Vec::new();
- for file_option in fs::read_dir(path)? {
- let file = file_option.unwrap();
- let owned_file_name = file.file_name();
- let filename = owned_file_name.to_str()
- .ok_or_else(|| std::io::Error::new(std::io::ErrorKind::InvalidData,
- "File name is not a valid utf8 string"))?;
- if !filename.is_ascii() || filename.len() < 65 {
- return Err(std::io::Error::new(
- std::io::ErrorKind::InvalidData,
- "Invalid ChannelMonitor file name",
- ));
- }
- if filename.ends_with(".tmp") {
- // If we were in the middle of committing an new update and crashed, it should be
- // safe to ignore the update - we should never have returned to the caller and
- // irrevocably committed to the new state in any way.
- continue;
- }
-
- let txid: Txid = Txid::from_hex(filename.split_at(64).0)
- .map_err(|_| std::io::Error::new(
- std::io::ErrorKind::InvalidData,
- "Invalid tx ID in filename",
- ))?;
-
- let index: u16 = filename.split_at(65).1.parse()
- .map_err(|_| std::io::Error::new(
- std::io::ErrorKind::InvalidData,
- "Invalid tx index in filename",
- ))?;
+pub mod fs_store;
- let contents = fs::read(&file.path())?;
- let mut buffer = Cursor::new(&contents);
- match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(&mut buffer, (&*entropy_source, &*signer_provider)) {
- Ok((blockhash, channel_monitor)) => {
- if channel_monitor.get_funding_txo().0.txid != txid || channel_monitor.get_funding_txo().0.index != index {
- return Err(std::io::Error::new(std::io::ErrorKind::InvalidData,
- "ChannelMonitor was stored in the wrong file"));
- }
- res.push((blockhash, channel_monitor));
- }
- Err(e) => return Err(std::io::Error::new(
- std::io::ErrorKind::InvalidData,
- format!("Failed to deserialize ChannelMonitor: {}", e),
- ))
- }
- }
- Ok(res)
- }
-}
-
-impl KVStorePersister for FilesystemPersister {
- fn persist<W: Writeable>(&self, key: &str, object: &W) -> std::io::Result<()> {
- let mut dest_file = PathBuf::from(self.path_to_channel_data.clone());
- dest_file.push(key);
- util::write_to_file(dest_file, object)
- }
-}
+mod utils;
#[cfg(test)]
-mod tests {
- extern crate lightning;
- extern crate bitcoin;
- use crate::FilesystemPersister;
- use bitcoin::hashes::hex::FromHex;
- use bitcoin::Txid;
- use lightning::chain::ChannelMonitorUpdateStatus;
- use lightning::chain::chainmonitor::Persist;
- use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
- use lightning::chain::transaction::OutPoint;
- use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
- use lightning::events::{ClosureReason, MessageSendEventsProvider};
- use lightning::ln::functional_test_utils::*;
- use lightning::util::test_utils;
- use std::fs;
- #[cfg(target_os = "windows")]
- use {
- lightning::get_event_msg,
- lightning::ln::msgs::ChannelMessageHandler,
- };
-
- impl Drop for FilesystemPersister {
- fn drop(&mut self) {
- // We test for invalid directory names, so it's OK if directory removal
- // fails.
- match fs::remove_dir_all(&self.path_to_channel_data) {
- Err(e) => println!("Failed to remove test persister directory: {}", e),
- _ => {}
- }
- }
- }
-
- #[test]
- fn test_if_monitors_is_not_dir() {
- let persister = FilesystemPersister::new("test_monitors_is_not_dir".to_string());
-
- fs::create_dir_all(&persister.path_to_channel_data).unwrap();
- let mut path = std::path::PathBuf::from(&persister.path_to_channel_data);
- path.push("monitors");
- fs::File::create(path).unwrap();
-
- let chanmon_cfgs = create_chanmon_cfgs(1);
- let mut node_cfgs = create_node_cfgs(1, &chanmon_cfgs);
- let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister, node_cfgs[0].keys_manager);
- node_cfgs[0].chain_monitor = chain_mon_0;
- let node_chanmgrs = create_node_chanmgrs(1, &node_cfgs, &[None]);
- let nodes = create_network(1, &node_cfgs, &node_chanmgrs);
-
- // Check that read_channelmonitors() returns error if monitors/ is not a
- // directory.
- assert!(persister.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).is_err());
- }
-
- // Integration-test the FilesystemPersister. Test relaying a few payments
- // and check that the persisted data is updated the appropriate number of
- // times.
- #[test]
- fn test_filesystem_persister() {
- // Create the nodes, giving them FilesystemPersisters for data persisters.
- let persister_0 = FilesystemPersister::new("test_filesystem_persister_0".to_string());
- let persister_1 = FilesystemPersister::new("test_filesystem_persister_1".to_string());
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &persister_0, node_cfgs[0].keys_manager);
- let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, &persister_1, node_cfgs[1].keys_manager);
- node_cfgs[0].chain_monitor = chain_mon_0;
- node_cfgs[1].chain_monitor = chain_mon_1;
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
- // Check that the persisted channel data is empty before any channels are
- // open.
- let mut persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
- assert_eq!(persisted_chan_data_0.len(), 0);
- let mut persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
- assert_eq!(persisted_chan_data_1.len(), 0);
-
- // Helper to make sure the channel is on the expected update ID.
- macro_rules! check_persisted_data {
- ($expected_update_id: expr) => {
- persisted_chan_data_0 = persister_0.read_channelmonitors(nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
- assert_eq!(persisted_chan_data_0.len(), 1);
- for (_, mon) in persisted_chan_data_0.iter() {
- assert_eq!(mon.get_latest_update_id(), $expected_update_id);
- }
- persisted_chan_data_1 = persister_1.read_channelmonitors(nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
- assert_eq!(persisted_chan_data_1.len(), 1);
- for (_, mon) in persisted_chan_data_1.iter() {
- assert_eq!(mon.get_latest_update_id(), $expected_update_id);
- }
- }
- }
-
- // Create some initial channel and check that a channel was persisted.
- let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
- check_persisted_data!(0);
-
- // Send a few payments and make sure the monitors are updated to the latest.
- send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
- check_persisted_data!(5);
- send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
- check_persisted_data!(10);
-
- // Force close because cooperative close doesn't result in any persisted
- // updates.
- nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
- check_closed_broadcast!(nodes[0], true);
- check_added_monitors!(nodes[0], 1);
-
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
- assert_eq!(node_txn.len(), 1);
-
- connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
- check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
- check_added_monitors!(nodes[1], 1);
-
- // Make sure everything is persisted as expected after close.
- check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
- }
-
- // Test that if the persister's path to channel data is read-only, writing a
- // monitor to it results in the persister returning a PermanentFailure.
- // Windows ignores the read-only flag for folders, so this test is Unix-only.
- #[cfg(not(target_os = "windows"))]
- #[test]
- fn test_readonly_dir_perm_failure() {
- let persister = FilesystemPersister::new("test_readonly_dir_perm_failure".to_string());
- fs::create_dir_all(&persister.path_to_channel_data).unwrap();
-
- // Set up a dummy channel and force close. This will produce a monitor
- // that we can then use to test persistence.
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
- let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
- let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
-
- // Set the persister's directory to read-only, which should result in
- // returning a permanent failure when we then attempt to persist a
- // channel update.
- let path = &persister.path_to_channel_data;
- let mut perms = fs::metadata(path).unwrap().permissions();
- perms.set_readonly(true);
- fs::set_permissions(path, perms).unwrap();
-
- let test_txo = OutPoint {
- txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
- index: 0
- };
- match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
- ChannelMonitorUpdateStatus::PermanentFailure => {},
- _ => panic!("unexpected result from persisting new channel")
- }
-
- nodes[1].node.get_and_clear_pending_msg_events();
- added_monitors.clear();
- }
-
- // Test that if a persister's directory name is invalid, monitor persistence
- // will fail.
- #[cfg(target_os = "windows")]
- #[test]
- fn test_fail_on_open() {
- // Set up a dummy channel and force close. This will produce a monitor
- // that we can then use to test persistence.
- let chanmon_cfgs = create_chanmon_cfgs(2);
- let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
- let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
- nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
- let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
- let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
- let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
-
- // Create the persister with an invalid directory name and test that the
- // channel fails to open because the directories fail to be created. There
- // don't seem to be invalid filename characters on Unix that Rust doesn't
- // handle, hence why the test is Windows-only.
- let persister = FilesystemPersister::new(":<>/".to_string());
-
- let test_txo = OutPoint {
- txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(),
- index: 0
- };
- match persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) {
- ChannelMonitorUpdateStatus::PermanentFailure => {},
- _ => panic!("unexpected result from persisting new channel")
- }
-
- nodes[1].node.get_and_clear_pending_msg_events();
- added_monitors.clear();
- }
-}
-
-#[cfg(ldk_bench)]
-/// Benches
-pub mod bench {
- use criterion::Criterion;
-
- /// Bench!
- pub fn bench_sends(bench: &mut Criterion) {
- let persister_a = super::FilesystemPersister::new("bench_filesystem_persister_a".to_string());
- let persister_b = super::FilesystemPersister::new("bench_filesystem_persister_b".to_string());
- lightning::ln::channelmanager::bench::bench_two_sends(
- bench, "bench_filesystem_persisted_sends", persister_a, persister_b);
- }
-}
+mod test_utils;
--- /dev/null
+use lightning::util::persist::{KVStore, KVSTORE_NAMESPACE_KEY_MAX_LEN, read_channel_monitors};
+use lightning::ln::functional_test_utils::{connect_block, create_announced_chan_between_nodes,
+ create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs,
+ send_payment};
+use lightning::chain::channelmonitor::CLOSED_CHANNEL_UPDATE_ID;
+use lightning::util::test_utils;
+use lightning::{check_closed_broadcast, check_closed_event, check_added_monitors};
+use lightning::events::ClosureReason;
+
+use std::panic::RefUnwindSafe;
+
+pub(crate) fn do_read_write_remove_list_persist<K: KVStore + RefUnwindSafe>(kv_store: &K) {
+ let data = [42u8; 32];
+
+ let namespace = "testspace";
+ let sub_namespace = "testsubspace";
+ let key = "testkey";
+
+ // Test the basic KVStore operations.
+ kv_store.write(namespace, sub_namespace, key, &data).unwrap();
+
+ // Test empty namespace/sub_namespace is allowed, but not empty namespace and non-empty
+ // sub-namespace, and not empty key.
+ kv_store.write("", "", key, &data).unwrap();
+ let res = std::panic::catch_unwind(|| kv_store.write("", sub_namespace, key, &data));
+ assert!(res.is_err());
+ let res = std::panic::catch_unwind(|| kv_store.write(namespace, sub_namespace, "", &data));
+ assert!(res.is_err());
+
+ let listed_keys = kv_store.list(namespace, sub_namespace).unwrap();
+ assert_eq!(listed_keys.len(), 1);
+ assert_eq!(listed_keys[0], key);
+
+ let read_data = kv_store.read(namespace, sub_namespace, key).unwrap();
+ assert_eq!(data, &*read_data);
+
+ kv_store.remove(namespace, sub_namespace, key, false).unwrap();
+
+ let listed_keys = kv_store.list(namespace, sub_namespace).unwrap();
+ assert_eq!(listed_keys.len(), 0);
+
+ // Ensure we have no issue operating with namespace/sub_namespace/key being KVSTORE_NAMESPACE_KEY_MAX_LEN
+ let max_chars: String = std::iter::repeat('A').take(KVSTORE_NAMESPACE_KEY_MAX_LEN).collect();
+ kv_store.write(&max_chars, &max_chars, &max_chars, &data).unwrap();
+
+ let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
+ assert_eq!(listed_keys.len(), 1);
+ assert_eq!(listed_keys[0], max_chars);
+
+ let read_data = kv_store.read(&max_chars, &max_chars, &max_chars).unwrap();
+ assert_eq!(data, &*read_data);
+
+ kv_store.remove(&max_chars, &max_chars, &max_chars, false).unwrap();
+
+ let listed_keys = kv_store.list(&max_chars, &max_chars).unwrap();
+ assert_eq!(listed_keys.len(), 0);
+}
+
+// Integration-test the given KVStore implementation. Test relaying a few payments and check that
+// the persisted data is updated the appropriate number of times.
+pub(crate) fn do_test_store<K: KVStore>(store_0: &K, store_1: &K) {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let chain_mon_0 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, store_0, node_cfgs[0].keys_manager);
+ let chain_mon_1 = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[1].chain_source), &chanmon_cfgs[1].tx_broadcaster, &chanmon_cfgs[1].logger, &chanmon_cfgs[1].fee_estimator, store_1, node_cfgs[1].keys_manager);
+ node_cfgs[0].chain_monitor = chain_mon_0;
+ node_cfgs[1].chain_monitor = chain_mon_1;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Check that the persisted channel data is empty before any channels are
+ // open.
+ let mut persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
+ assert_eq!(persisted_chan_data_0.len(), 0);
+ let mut persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
+ assert_eq!(persisted_chan_data_1.len(), 0);
+
+ // Helper to make sure the channel is on the expected update ID.
+ macro_rules! check_persisted_data {
+ ($expected_update_id: expr) => {
+ persisted_chan_data_0 = read_channel_monitors(store_0, nodes[0].keys_manager, nodes[0].keys_manager).unwrap();
+ assert_eq!(persisted_chan_data_0.len(), 1);
+ for (_, mon) in persisted_chan_data_0.iter() {
+ assert_eq!(mon.get_latest_update_id(), $expected_update_id);
+ }
+ persisted_chan_data_1 = read_channel_monitors(store_1, nodes[1].keys_manager, nodes[1].keys_manager).unwrap();
+ assert_eq!(persisted_chan_data_1.len(), 1);
+ for (_, mon) in persisted_chan_data_1.iter() {
+ assert_eq!(mon.get_latest_update_id(), $expected_update_id);
+ }
+ }
+ }
+
+ // Create some initial channel and check that a channel was persisted.
+ let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
+ check_persisted_data!(0);
+
+ // Send a few payments and make sure the monitors are updated to the latest.
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+ check_persisted_data!(5);
+ send_payment(&nodes[1], &vec!(&nodes[0])[..], 4000000);
+ check_persisted_data!(10);
+
+ // Force close because cooperative close doesn't result in any persisted
+ // updates.
+ nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+
+ connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
+ check_closed_broadcast!(nodes[1], true);
+ check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
+ check_added_monitors!(nodes[1], 1);
+
+ // Make sure everything is persisted as expected after close.
+ check_persisted_data!(CLOSED_CHANNEL_UPDATE_ID);
+}
+++ /dev/null
-#[cfg(target_os = "windows")]
-extern crate winapi;
-
-use std::fs;
-use std::path::PathBuf;
-use std::io::BufWriter;
-
-#[cfg(not(target_os = "windows"))]
-use std::os::unix::io::AsRawFd;
-
-use lightning::util::ser::Writeable;
-
-#[cfg(target_os = "windows")]
-use {
- std::ffi::OsStr,
- std::os::windows::ffi::OsStrExt
-};
-
-#[cfg(target_os = "windows")]
-macro_rules! call {
- ($e: expr) => (
- if $e != 0 {
- return Ok(())
- } else {
- return Err(std::io::Error::last_os_error())
- }
- )
-}
-
-#[cfg(target_os = "windows")]
-fn path_to_windows_str<T: AsRef<OsStr>>(path: T) -> Vec<winapi::shared::ntdef::WCHAR> {
- path.as_ref().encode_wide().chain(Some(0)).collect()
-}
-
-#[allow(bare_trait_objects)]
-pub(crate) fn write_to_file<W: Writeable>(dest_file: PathBuf, data: &W) -> std::io::Result<()> {
- let mut tmp_file = dest_file.clone();
- tmp_file.set_extension("tmp");
-
- let parent_directory = dest_file.parent().unwrap();
- fs::create_dir_all(parent_directory)?;
- // Do a crazy dance with lots of fsync()s to be overly cautious here...
- // We never want to end up in a state where we've lost the old data, or end up using the
- // old data on power loss after we've returned.
- // The way to atomically write a file on Unix platforms is:
- // open(tmpname), write(tmpfile), fsync(tmpfile), close(tmpfile), rename(), fsync(dir)
- {
- // Note that going by rust-lang/rust@d602a6b, on MacOS it is only safe to use
- // rust stdlib 1.36 or higher.
- let mut buf = BufWriter::new(fs::File::create(&tmp_file)?);
- data.write(&mut buf)?;
- buf.into_inner()?.sync_all()?;
- }
- // Fsync the parent directory on Unix.
- #[cfg(not(target_os = "windows"))]
- {
- fs::rename(&tmp_file, &dest_file)?;
- let dir_file = fs::OpenOptions::new().read(true).open(parent_directory)?;
- unsafe { libc::fsync(dir_file.as_raw_fd()); }
- }
- #[cfg(target_os = "windows")]
- {
- if dest_file.exists() {
- unsafe {winapi::um::winbase::ReplaceFileW(
- path_to_windows_str(dest_file).as_ptr(), path_to_windows_str(tmp_file).as_ptr(), std::ptr::null(),
- winapi::um::winbase::REPLACEFILE_IGNORE_MERGE_ERRORS,
- std::ptr::null_mut() as *mut winapi::ctypes::c_void,
- std::ptr::null_mut() as *mut winapi::ctypes::c_void
- )};
- } else {
- call!(unsafe {winapi::um::winbase::MoveFileExW(
- path_to_windows_str(tmp_file).as_ptr(), path_to_windows_str(dest_file).as_ptr(),
- winapi::um::winbase::MOVEFILE_WRITE_THROUGH | winapi::um::winbase::MOVEFILE_REPLACE_EXISTING
- )});
- }
- }
- Ok(())
-}
-
-#[cfg(test)]
-mod tests {
- use lightning::util::ser::{Writer, Writeable};
-
- use super::{write_to_file};
- use std::fs;
- use std::io;
- use std::path::PathBuf;
-
- struct TestWriteable{}
- impl Writeable for TestWriteable {
- fn write<W: Writer>(&self, writer: &mut W) -> Result<(), std::io::Error> {
- writer.write_all(&[42; 1])
- }
- }
-
- // Test that if the persister's path to channel data is read-only, writing
- // data to it fails. Windows ignores the read-only flag for folders, so this
- // test is Unix-only.
- #[cfg(not(target_os = "windows"))]
- #[test]
- fn test_readonly_dir() {
- let test_writeable = TestWriteable{};
- let filename = "test_readonly_dir_persister_filename".to_string();
- let path = "test_readonly_dir_persister_dir";
- fs::create_dir_all(path).unwrap();
- let mut perms = fs::metadata(path).unwrap().permissions();
- perms.set_readonly(true);
- fs::set_permissions(path, perms).unwrap();
- let mut dest_file = PathBuf::from(path);
- dest_file.push(filename);
- match write_to_file(dest_file, &test_writeable) {
- Err(e) => assert_eq!(e.kind(), io::ErrorKind::PermissionDenied),
- _ => panic!("Unexpected error message")
- }
- }
-
- // Test failure to rename in the process of atomically creating a channel
- // monitor's file. We induce this failure by making the `tmp` file a
- // directory.
- // Explanation: given "from" = the file being renamed, "to" = the destination
- // file that already exists: Unix should fail because if "from" is a file,
- // then "to" is also required to be a file.
- // TODO: ideally try to make this work on Windows again
- #[cfg(not(target_os = "windows"))]
- #[test]
- fn test_rename_failure() {
- let test_writeable = TestWriteable{};
- let filename = "test_rename_failure_filename";
- let path = "test_rename_failure_dir";
- let mut dest_file = PathBuf::from(path);
- dest_file.push(filename);
- // Create the channel data file and make it a directory.
- fs::create_dir_all(dest_file.clone()).unwrap();
- match write_to_file(dest_file, &test_writeable) {
- Err(e) => assert_eq!(e.raw_os_error(), Some(libc::EISDIR)),
- _ => panic!("Unexpected Ok(())")
- }
- fs::remove_dir_all(path).unwrap();
- }
-
- #[test]
- fn test_diskwriteable_failure() {
- struct FailingWriteable {}
- impl Writeable for FailingWriteable {
- fn write<W: Writer>(&self, _writer: &mut W) -> Result<(), std::io::Error> {
- Err(std::io::Error::new(std::io::ErrorKind::Other, "expected failure"))
- }
- }
-
- let filename = "test_diskwriteable_failure";
- let path = "test_diskwriteable_failure_dir";
- let test_writeable = FailingWriteable{};
- let mut dest_file = PathBuf::from(path);
- dest_file.push(filename);
- match write_to_file(dest_file, &test_writeable) {
- Err(e) => {
- assert_eq!(e.kind(), std::io::ErrorKind::Other);
- assert_eq!(e.get_ref().unwrap().to_string(), "expected failure");
- },
- _ => panic!("unexpected result")
- }
- fs::remove_dir_all(path).unwrap();
- }
-
- // Test failure to create the temporary file in the persistence process.
- // We induce this failure by having the temp file already exist and be a
- // directory.
- #[test]
- fn test_tmp_file_creation_failure() {
- let test_writeable = TestWriteable{};
- let filename = "test_tmp_file_creation_failure_filename".to_string();
- let path = "test_tmp_file_creation_failure_dir";
- let mut dest_file = PathBuf::from(path);
- dest_file.push(filename);
- let mut tmp_file = dest_file.clone();
- tmp_file.set_extension("tmp");
- fs::create_dir_all(tmp_file).unwrap();
- match write_to_file(dest_file, &test_writeable) {
- Err(e) => {
- #[cfg(not(target_os = "windows"))]
- assert_eq!(e.raw_os_error(), Some(libc::EISDIR));
- #[cfg(target_os = "windows")]
- assert_eq!(e.kind(), io::ErrorKind::PermissionDenied);
- }
- _ => panic!("Unexpected error message")
- }
- }
-}
--- /dev/null
+use lightning::util::persist::{KVSTORE_NAMESPACE_KEY_ALPHABET, KVSTORE_NAMESPACE_KEY_MAX_LEN};
+use lightning::util::string::PrintableString;
+
+
+pub(crate) fn is_valid_kvstore_str(key: &str) -> bool {
+ key.len() <= KVSTORE_NAMESPACE_KEY_MAX_LEN && key.chars().all(|c| KVSTORE_NAMESPACE_KEY_ALPHABET.contains(c))
+}
+
+pub(crate) fn check_namespace_key_validity(namespace: &str, sub_namespace: &str, key: Option<&str>, operation: &str) -> Result<(), std::io::Error> {
+ if let Some(key) = key {
+ if key.is_empty() {
+ debug_assert!(false, "Failed to {} {}/{}/{}: key may not be empty.", operation,
+ PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+ let msg = format!("Failed to {} {}/{}/{}: key may not be empty.", operation,
+ PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+ }
+
+ if namespace.is_empty() && !sub_namespace.is_empty() {
+ debug_assert!(false,
+ "Failed to {} {}/{}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+ operation,
+ PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+ let msg = format!(
+ "Failed to {} {}/{}/{}: namespace may not be empty if a non-empty sub-namespace is given.", operation,
+ PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+ }
+
+ if !is_valid_kvstore_str(namespace) || !is_valid_kvstore_str(sub_namespace) || !is_valid_kvstore_str(key) {
+ debug_assert!(false, "Failed to {} {}/{}/{}: namespace, sub-namespace, and key must be valid.",
+ operation,
+ PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+ let msg = format!("Failed to {} {}/{}/{}: namespace, sub-namespace, and key must be valid.",
+ operation,
+ PrintableString(namespace), PrintableString(sub_namespace), PrintableString(key));
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+ }
+ } else {
+ if namespace.is_empty() && !sub_namespace.is_empty() {
+ debug_assert!(false,
+ "Failed to {} {}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+ operation, PrintableString(namespace), PrintableString(sub_namespace));
+ let msg = format!(
+ "Failed to {} {}/{}: namespace may not be empty if a non-empty sub-namespace is given.",
+ operation, PrintableString(namespace), PrintableString(sub_namespace));
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+ }
+ if !is_valid_kvstore_str(namespace) || !is_valid_kvstore_str(sub_namespace) {
+ debug_assert!(false, "Failed to {} {}/{}: namespace and sub-namespace must be valid.",
+ operation, PrintableString(namespace), PrintableString(sub_namespace));
+ let msg = format!("Failed to {} {}/{}: namespace and sub-namespace must be valid.",
+ operation, PrintableString(namespace), PrintableString(sub_namespace));
+ return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
+ }
+ }
+
+ Ok(())
+}
use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
-use crate::sign::EntropySource;
use crate::ln::msgs::DecodeError;
+use crate::offers::invoice::BlindedPayInfo;
+use crate::sign::EntropySource;
use crate::util::ser::{Readable, Writeable, Writer};
use crate::io;
})
}
- /// Create a blinded path for a payment, to be forwarded along `path`. The last node
- /// in `path` will be the destination node.
+ /// Create a blinded path for a payment, to be forwarded along `intermediate_nodes`.
+ ///
+ /// Errors if:
+ /// * a provided node id is invalid
+ /// * [`BlindedPayInfo`] calculation results in an integer overflow
+ /// * any unknown features are required in the provided [`ForwardTlvs`]
///
- /// Errors if `path` is empty or a node id in `path` is invalid.
+ /// [`ForwardTlvs`]: crate::blinded_path::payment::ForwardTlvs
// TODO: make all payloads the same size with padding + add dummy hops
pub fn new_for_payment<ES: EntropySource, T: secp256k1::Signing + secp256k1::Verification>(
- intermediate_nodes: &[(PublicKey, payment::ForwardTlvs)], payee_node_id: PublicKey,
- payee_tlvs: payment::ReceiveTlvs, entropy_source: &ES, secp_ctx: &Secp256k1<T>
- ) -> Result<Self, ()> {
+ intermediate_nodes: &[payment::ForwardNode], payee_node_id: PublicKey,
+ payee_tlvs: payment::ReceiveTlvs, htlc_maximum_msat: u64, entropy_source: &ES,
+ secp_ctx: &Secp256k1<T>
+ ) -> Result<(BlindedPayInfo, Self), ()> {
let blinding_secret_bytes = entropy_source.get_secure_random_bytes();
let blinding_secret = SecretKey::from_slice(&blinding_secret_bytes[..]).expect("RNG is busted");
- Ok(BlindedPath {
- introduction_node_id: intermediate_nodes.first().map_or(payee_node_id, |n| n.0),
+ let blinded_payinfo = payment::compute_payinfo(intermediate_nodes, &payee_tlvs, htlc_maximum_msat)?;
+ Ok((blinded_payinfo, BlindedPath {
+ introduction_node_id: intermediate_nodes.first().map_or(payee_node_id, |n| n.node_id),
blinding_point: PublicKey::from_secret_key(secp_ctx, &blinding_secret),
blinded_hops: payment::blinded_hops(
secp_ctx, intermediate_nodes, payee_node_id, payee_tlvs, &blinding_secret
).map_err(|_| ())?,
- })
+ }))
}
}
use crate::ln::PaymentSecret;
use crate::ln::features::BlindedHopFeatures;
use crate::ln::msgs::DecodeError;
+use crate::offers::invoice::BlindedPayInfo;
use crate::prelude::*;
use crate::util::ser::{Readable, Writeable, Writer};
+use core::convert::TryFrom;
+
+/// An intermediate node, its outbound channel, and relay parameters.
+#[derive(Clone, Debug)]
+pub struct ForwardNode {
+ /// The TLVs for this node's [`BlindedHop`], where the fee parameters contained within are also
+ /// used for [`BlindedPayInfo`] construction.
+ pub tlvs: ForwardTlvs,
+ /// This node's pubkey.
+ pub node_id: PublicKey,
+ /// The maximum value, in msat, that may be accepted by this node.
+ pub htlc_maximum_msat: u64,
+}
+
/// Data to construct a [`BlindedHop`] for forwarding a payment.
+#[derive(Clone, Debug)]
pub struct ForwardTlvs {
/// The short channel id this payment should be forwarded out over.
- short_channel_id: u64,
+ pub short_channel_id: u64,
/// Payment parameters for relaying over [`Self::short_channel_id`].
- payment_relay: PaymentRelay,
+ pub payment_relay: PaymentRelay,
/// Payment constraints for relaying over [`Self::short_channel_id`].
- payment_constraints: PaymentConstraints,
+ pub payment_constraints: PaymentConstraints,
/// Supported and required features when relaying a payment onion containing this object's
/// corresponding [`BlindedHop::encrypted_payload`].
///
/// [`BlindedHop::encrypted_payload`]: crate::blinded_path::BlindedHop::encrypted_payload
- features: BlindedHopFeatures,
+ pub features: BlindedHopFeatures,
}
/// Data to construct a [`BlindedHop`] for receiving a payment. This payload is custom to LDK and
/// may not be valid if received by another lightning implementation.
+#[derive(Clone, Debug)]
pub struct ReceiveTlvs {
/// Used to authenticate the sender of a payment to the receiver and tie MPP HTLCs together.
- payment_secret: PaymentSecret,
+ pub payment_secret: PaymentSecret,
/// Constraints for the receiver of this payment.
- payment_constraints: PaymentConstraints,
+ pub payment_constraints: PaymentConstraints,
}
/// Data to construct a [`BlindedHop`] for sending a payment over.
/// Parameters for relaying over a given [`BlindedHop`].
///
/// [`BlindedHop`]: crate::blinded_path::BlindedHop
+#[derive(Clone, Debug)]
pub struct PaymentRelay {
/// Number of blocks subtracted from an incoming HTLC's `cltv_expiry` for this [`BlindedHop`].
- ///
- ///[`BlindedHop`]: crate::blinded_path::BlindedHop
pub cltv_expiry_delta: u16,
/// Liquidity fee charged (in millionths of the amount transferred) for relaying a payment over
/// this [`BlindedHop`], (i.e., 10,000 is 1%).
- ///
- ///[`BlindedHop`]: crate::blinded_path::BlindedHop
pub fee_proportional_millionths: u32,
/// Base fee charged (in millisatoshi) for relaying a payment over this [`BlindedHop`].
- ///
- ///[`BlindedHop`]: crate::blinded_path::BlindedHop
pub fee_base_msat: u32,
}
/// Constraints for relaying over a given [`BlindedHop`].
///
/// [`BlindedHop`]: crate::blinded_path::BlindedHop
+#[derive(Clone, Debug)]
pub struct PaymentConstraints {
/// The maximum total CLTV delta that is acceptable when relaying a payment over this
/// [`BlindedHop`].
- ///
- ///[`BlindedHop`]: crate::blinded_path::BlindedHop
pub max_cltv_expiry: u32,
- /// The minimum value, in msat, that may be relayed over this [`BlindedHop`].
+ /// The minimum value, in msat, that may be accepted by the node corresponding to this
+ /// [`BlindedHop`].
pub htlc_minimum_msat: u64,
}
-impl_writeable_tlv_based!(ForwardTlvs, {
- (2, short_channel_id, required),
- (10, payment_relay, required),
- (12, payment_constraints, required),
- (14, features, required),
-});
+impl Writeable for ForwardTlvs {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ encode_tlv_stream!(w, {
+ (2, self.short_channel_id, required),
+ (10, self.payment_relay, required),
+ (12, self.payment_constraints, required),
+ (14, self.features, required)
+ });
+ Ok(())
+ }
+}
-impl_writeable_tlv_based!(ReceiveTlvs, {
- (12, payment_constraints, required),
- (65536, payment_secret, required),
-});
+impl Writeable for ReceiveTlvs {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ encode_tlv_stream!(w, {
+ (12, self.payment_constraints, required),
+ (65536, self.payment_secret, required)
+ });
+ Ok(())
+ }
+}
impl<'a> Writeable for BlindedPaymentTlvsRef<'a> {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
/// Construct blinded payment hops for the given `intermediate_nodes` and payee info.
pub(super) fn blinded_hops<T: secp256k1::Signing + secp256k1::Verification>(
- secp_ctx: &Secp256k1<T>, intermediate_nodes: &[(PublicKey, ForwardTlvs)],
+ secp_ctx: &Secp256k1<T>, intermediate_nodes: &[ForwardNode],
payee_node_id: PublicKey, payee_tlvs: ReceiveTlvs, session_priv: &SecretKey
) -> Result<Vec<BlindedHop>, secp256k1::Error> {
- let pks = intermediate_nodes.iter().map(|(pk, _)| pk)
+ let pks = intermediate_nodes.iter().map(|node| &node.node_id)
.chain(core::iter::once(&payee_node_id));
- let tlvs = intermediate_nodes.iter().map(|(_, tlvs)| BlindedPaymentTlvsRef::Forward(tlvs))
+ let tlvs = intermediate_nodes.iter().map(|node| BlindedPaymentTlvsRef::Forward(&node.tlvs))
.chain(core::iter::once(BlindedPaymentTlvsRef::Receive(&payee_tlvs)));
utils::construct_blinded_hops(secp_ctx, pks, tlvs, session_priv)
}
+/// `None` if underflow occurs.
+fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option<u64> {
+ let inbound_amt = inbound_amt_msat as u128;
+ let base = payment_relay.fee_base_msat as u128;
+ let prop = payment_relay.fee_proportional_millionths as u128;
+
+ let post_base_fee_inbound_amt =
+ if let Some(amt) = inbound_amt.checked_sub(base) { amt } else { return None };
+ let mut amt_to_forward =
+ (post_base_fee_inbound_amt * 1_000_000 + 1_000_000 + prop - 1) / (prop + 1_000_000);
+
+ let fee = ((amt_to_forward * prop) / 1_000_000) + base;
+ if inbound_amt - fee < amt_to_forward {
+ // Rounding up the forwarded amount resulted in underpaying this node, so take an extra 1 msat
+ // in fee to compensate.
+ amt_to_forward -= 1;
+ }
+ debug_assert_eq!(amt_to_forward + fee, inbound_amt);
+ u64::try_from(amt_to_forward).ok()
+}
+
+pub(super) fn compute_payinfo(
+ intermediate_nodes: &[ForwardNode], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64
+) -> Result<BlindedPayInfo, ()> {
+ let mut curr_base_fee: u64 = 0;
+ let mut curr_prop_mil: u64 = 0;
+ let mut cltv_expiry_delta: u16 = 0;
+ for tlvs in intermediate_nodes.iter().rev().map(|n| &n.tlvs) {
+ // In the future, we'll want to take the intersection of all supported features for the
+ // `BlindedPayInfo`, but there are no features in that context right now.
+ if tlvs.features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { return Err(()) }
+
+ let next_base_fee = tlvs.payment_relay.fee_base_msat as u64;
+ let next_prop_mil = tlvs.payment_relay.fee_proportional_millionths as u64;
+ // Use integer arithmetic to compute `ceil(a/b)` as `(a+b-1)/b`
+ // ((curr_base_fee * (1_000_000 + next_prop_mil)) / 1_000_000) + next_base_fee
+ curr_base_fee = curr_base_fee.checked_mul(1_000_000 + next_prop_mil)
+ .and_then(|f| f.checked_add(1_000_000 - 1))
+ .map(|f| f / 1_000_000)
+ .and_then(|f| f.checked_add(next_base_fee))
+ .ok_or(())?;
+ // ceil(((curr_prop_mil + 1_000_000) * (next_prop_mil + 1_000_000)) / 1_000_000) - 1_000_000
+ curr_prop_mil = curr_prop_mil.checked_add(1_000_000)
+ .and_then(|f1| next_prop_mil.checked_add(1_000_000).and_then(|f2| f2.checked_mul(f1)))
+ .and_then(|f| f.checked_add(1_000_000 - 1))
+ .map(|f| f / 1_000_000)
+ .and_then(|f| f.checked_sub(1_000_000))
+ .ok_or(())?;
+
+ cltv_expiry_delta = cltv_expiry_delta.checked_add(tlvs.payment_relay.cltv_expiry_delta).ok_or(())?;
+ }
+
+ let mut htlc_minimum_msat: u64 = 1;
+ let mut htlc_maximum_msat: u64 = 21_000_000 * 100_000_000 * 1_000; // Total bitcoin supply
+ for node in intermediate_nodes.iter() {
+ // The min htlc for an intermediate node is that node's min minus the fees charged by all of the
+ // following hops for forwarding that min, since that fee amount will automatically be included
+ // in the amount that this node receives and contribute towards reaching its min.
+ htlc_minimum_msat = amt_to_forward_msat(
+ core::cmp::max(node.tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat),
+ &node.tlvs.payment_relay
+ ).unwrap_or(1); // If underflow occurs, we definitely reached this node's min
+ htlc_maximum_msat = amt_to_forward_msat(
+ core::cmp::min(node.htlc_maximum_msat, htlc_maximum_msat), &node.tlvs.payment_relay
+ ).ok_or(())?; // If underflow occurs, we cannot send to this hop without exceeding their max
+ }
+ htlc_minimum_msat = core::cmp::max(
+ payee_tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat
+ );
+ htlc_maximum_msat = core::cmp::min(payee_htlc_maximum_msat, htlc_maximum_msat);
+
+ if htlc_maximum_msat < htlc_minimum_msat { return Err(()) }
+ Ok(BlindedPayInfo {
+ fee_base_msat: u32::try_from(curr_base_fee).map_err(|_| ())?,
+ fee_proportional_millionths: u32::try_from(curr_prop_mil).map_err(|_| ())?,
+ cltv_expiry_delta,
+ htlc_minimum_msat,
+ htlc_maximum_msat,
+ features: BlindedHopFeatures::empty(),
+ })
+}
+
impl_writeable_msg!(PaymentRelay, {
cltv_expiry_delta,
fee_proportional_millionths,
max_cltv_expiry,
htlc_minimum_msat
}, {});
+
+#[cfg(test)]
+mod tests {
+ use bitcoin::secp256k1::PublicKey;
+ use crate::blinded_path::payment::{ForwardNode, ForwardTlvs, ReceiveTlvs, PaymentConstraints, PaymentRelay};
+ use crate::ln::PaymentSecret;
+ use crate::ln::features::BlindedHopFeatures;
+
+ #[test]
+ fn compute_payinfo() {
+ // Taken from the spec example for aggregating blinded payment info. See
+ // https://github.com/lightning/bolts/blob/master/proposals/route-blinding.md#blinded-payments
+ let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
+ let intermediate_nodes = vec![ForwardNode {
+ node_id: dummy_pk,
+ tlvs: ForwardTlvs {
+ short_channel_id: 0,
+ payment_relay: PaymentRelay {
+ cltv_expiry_delta: 144,
+ fee_proportional_millionths: 500,
+ fee_base_msat: 100,
+ },
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 100,
+ },
+ features: BlindedHopFeatures::empty(),
+ },
+ htlc_maximum_msat: u64::max_value(),
+ }, ForwardNode {
+ node_id: dummy_pk,
+ tlvs: ForwardTlvs {
+ short_channel_id: 0,
+ payment_relay: PaymentRelay {
+ cltv_expiry_delta: 144,
+ fee_proportional_millionths: 500,
+ fee_base_msat: 100,
+ },
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 1_000,
+ },
+ features: BlindedHopFeatures::empty(),
+ },
+ htlc_maximum_msat: u64::max_value(),
+ }];
+ let recv_tlvs = ReceiveTlvs {
+ payment_secret: PaymentSecret([0; 32]),
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 1,
+ },
+ };
+ let htlc_maximum_msat = 100_000;
+ let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+ assert_eq!(blinded_payinfo.fee_base_msat, 201);
+ assert_eq!(blinded_payinfo.fee_proportional_millionths, 1001);
+ assert_eq!(blinded_payinfo.cltv_expiry_delta, 288);
+ assert_eq!(blinded_payinfo.htlc_minimum_msat, 900);
+ assert_eq!(blinded_payinfo.htlc_maximum_msat, htlc_maximum_msat);
+ }
+
+ #[test]
+ fn compute_payinfo_1_hop() {
+ let recv_tlvs = ReceiveTlvs {
+ payment_secret: PaymentSecret([0; 32]),
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 1,
+ },
+ };
+ let blinded_payinfo = super::compute_payinfo(&[], &recv_tlvs, 4242).unwrap();
+ assert_eq!(blinded_payinfo.fee_base_msat, 0);
+ assert_eq!(blinded_payinfo.fee_proportional_millionths, 0);
+ assert_eq!(blinded_payinfo.cltv_expiry_delta, 0);
+ assert_eq!(blinded_payinfo.htlc_minimum_msat, 1);
+ assert_eq!(blinded_payinfo.htlc_maximum_msat, 4242);
+ }
+
+ #[test]
+ fn simple_aggregated_htlc_min() {
+ // If no hops charge fees, the htlc_minimum_msat should just be the maximum htlc_minimum_msat
+ // along the path.
+ let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
+ let intermediate_nodes = vec![ForwardNode {
+ node_id: dummy_pk,
+ tlvs: ForwardTlvs {
+ short_channel_id: 0,
+ payment_relay: PaymentRelay {
+ cltv_expiry_delta: 0,
+ fee_proportional_millionths: 0,
+ fee_base_msat: 0,
+ },
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 1,
+ },
+ features: BlindedHopFeatures::empty(),
+ },
+ htlc_maximum_msat: u64::max_value()
+ }, ForwardNode {
+ node_id: dummy_pk,
+ tlvs: ForwardTlvs {
+ short_channel_id: 0,
+ payment_relay: PaymentRelay {
+ cltv_expiry_delta: 0,
+ fee_proportional_millionths: 0,
+ fee_base_msat: 0,
+ },
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 2_000,
+ },
+ features: BlindedHopFeatures::empty(),
+ },
+ htlc_maximum_msat: u64::max_value()
+ }];
+ let recv_tlvs = ReceiveTlvs {
+ payment_secret: PaymentSecret([0; 32]),
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 3,
+ },
+ };
+ let htlc_maximum_msat = 100_000;
+ let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+ assert_eq!(blinded_payinfo.htlc_minimum_msat, 2_000);
+ }
+
+ #[test]
+ fn aggregated_htlc_min() {
+ // Create a path with varying fees and htlc_mins, and make sure htlc_minimum_msat ends up as the
+ // max (htlc_min - following_fees) along the path.
+ let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
+ let intermediate_nodes = vec![ForwardNode {
+ node_id: dummy_pk,
+ tlvs: ForwardTlvs {
+ short_channel_id: 0,
+ payment_relay: PaymentRelay {
+ cltv_expiry_delta: 0,
+ fee_proportional_millionths: 500,
+ fee_base_msat: 1_000,
+ },
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 5_000,
+ },
+ features: BlindedHopFeatures::empty(),
+ },
+ htlc_maximum_msat: u64::max_value()
+ }, ForwardNode {
+ node_id: dummy_pk,
+ tlvs: ForwardTlvs {
+ short_channel_id: 0,
+ payment_relay: PaymentRelay {
+ cltv_expiry_delta: 0,
+ fee_proportional_millionths: 500,
+ fee_base_msat: 200,
+ },
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 2_000,
+ },
+ features: BlindedHopFeatures::empty(),
+ },
+ htlc_maximum_msat: u64::max_value()
+ }];
+ let recv_tlvs = ReceiveTlvs {
+ payment_secret: PaymentSecret([0; 32]),
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 1,
+ },
+ };
+ let htlc_minimum_msat = 3798;
+ assert!(super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_minimum_msat - 1).is_err());
+
+ let htlc_maximum_msat = htlc_minimum_msat + 1;
+ let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat).unwrap();
+ assert_eq!(blinded_payinfo.htlc_minimum_msat, htlc_minimum_msat);
+ assert_eq!(blinded_payinfo.htlc_maximum_msat, htlc_maximum_msat);
+ }
+
+ #[test]
+ fn aggregated_htlc_max() {
+ // Create a path with varying fees and `htlc_maximum_msat`s, and make sure the aggregated max
+ // htlc ends up as the min (htlc_max - following_fees) along the path.
+ let dummy_pk = PublicKey::from_slice(&[2; 33]).unwrap();
+ let intermediate_nodes = vec![ForwardNode {
+ node_id: dummy_pk,
+ tlvs: ForwardTlvs {
+ short_channel_id: 0,
+ payment_relay: PaymentRelay {
+ cltv_expiry_delta: 0,
+ fee_proportional_millionths: 500,
+ fee_base_msat: 1_000,
+ },
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 1,
+ },
+ features: BlindedHopFeatures::empty(),
+ },
+ htlc_maximum_msat: 5_000,
+ }, ForwardNode {
+ node_id: dummy_pk,
+ tlvs: ForwardTlvs {
+ short_channel_id: 0,
+ payment_relay: PaymentRelay {
+ cltv_expiry_delta: 0,
+ fee_proportional_millionths: 500,
+ fee_base_msat: 1,
+ },
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 1,
+ },
+ features: BlindedHopFeatures::empty(),
+ },
+ htlc_maximum_msat: 10_000
+ }];
+ let recv_tlvs = ReceiveTlvs {
+ payment_secret: PaymentSecret([0; 32]),
+ payment_constraints: PaymentConstraints {
+ max_cltv_expiry: 0,
+ htlc_minimum_msat: 1,
+ },
+ };
+
+ let blinded_payinfo = super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, 10_000).unwrap();
+ assert_eq!(blinded_payinfo.htlc_maximum_msat, 3997);
+ }
+}
/// serialized prior to LDK version 0.0.117.
sender_intended_total_msat: Option<u64>,
},
+ /// Indicates a request for an invoice failed to yield a response in a reasonable amount of time
+ /// or was explicitly abandoned by [`ChannelManager::abandon_payment`].
+ ///
+ /// [`ChannelManager::abandon_payment`]: crate::ln::channelmanager::ChannelManager::abandon_payment
+ InvoiceRequestFailed {
+ /// The `payment_id` to have been associated with payment for the requested invoice.
+ payment_id: PaymentId,
+ },
/// Indicates an outbound payment we made succeeded (i.e. it made it all the way to its target
/// and we got back the payment preimage for it).
///
(8, funding_txo, required),
});
},
+ &Event::InvoiceRequestFailed { ref payment_id } => {
+ 33u8.write(writer)?;
+ write_tlv_fields!(writer, {
+ (0, payment_id, required),
+ })
+ },
// Note that, going forward, all new events must only write data inside of
// `write_tlv_fields`. Versions 0.0.101+ will ignore odd-numbered events that write
// data via `write_tlv_fields`.
};
f()
},
+ 33u8 => {
+ let f = || {
+ _init_and_read_len_prefixed_tlv_fields!(reader, {
+ (0, payment_id, required),
+ });
+ Ok(Some(Event::InvoiceRequestFailed {
+ payment_id: payment_id.0.unwrap(),
+ }))
+ };
+ f()
+ },
// Versions prior to 0.0.100 did not ignore odd types, instead returning InvalidValue.
// Version 0.0.100 failed to properly ignore odd types, possibly resulting in corrupt
// reads.
use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields};
-use crate::ln::channel::AnnouncementSigsState;
+use crate::ln::channel::{AnnouncementSigsState, ChannelPhase};
use crate::ln::msgs;
use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler};
use crate::util::test_channel_signer::TestChannelSigner;
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2);
- if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
- // Check that even though the persister is returning a InProgress,
- // because the update is bogus, ultimately the error that's returned
- // should be a PermanentFailure.
- if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
- logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
- assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
- } else { assert!(false); }
+ if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2) {
+ if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ // Check that even though the persister is returning a InProgress,
+ // because the update is bogus, ultimately the error that's returned
+ // should be a PermanentFailure.
+ if let ChannelMonitorUpdateStatus::PermanentFailure = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor error to be permanent"); }
+ logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Persistence of ChannelMonitorUpdate for channel [0-9a-f]* in progress").unwrap(), 1);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+ } else { assert!(false); }
+ } else {
+ assert!(false);
+ }
}
check_added_monitors!(nodes[0], 1);
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
{
let mut node_1_per_peer_lock;
let mut node_1_peer_state_lock;
- get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
+ get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived;
}
// Route the payment and deliver the initial commitment_signed (with a monitor update failure
(0, update, required),
});
+/// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
+/// its variants containing an appropriate channel struct.
+pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
+ UnfundedOutboundV1(OutboundV1Channel<SP>),
+ UnfundedInboundV1(InboundV1Channel<SP>),
+ Funded(Channel<SP>),
+}
+
+impl<'a, SP: Deref> ChannelPhase<SP> where
+ SP::Target: SignerProvider,
+ <SP::Target as SignerProvider>::Signer: ChannelSigner,
+{
+ pub fn context(&'a self) -> &'a ChannelContext<SP> {
+ match self {
+ ChannelPhase::Funded(chan) => &chan.context,
+ ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
+ ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
+ }
+ }
+
+ pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
+ match self {
+ ChannelPhase::Funded(ref mut chan) => &mut chan.context,
+ ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
+ ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
+ }
+ }
+}
+
/// Contains all state common to unfunded inbound/outbound channels.
pub(super) struct UnfundedChannelContext {
/// A counter tracking how many ticks have elapsed since this unfunded channel was
(commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
}
-// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
-// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
-// calling channel_id() before we're set up or things like get_funding_signed on an
-// inbound channel.
-//
// Holder designates channel data owned for the benefit of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
use crate::ln::{inbound_payment, ChannelId, PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channel::{Channel, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
+use crate::ln::channel::{Channel, ChannelPhase, ChannelContext, ChannelError, ChannelUpdateStatus, ShutdownResult, UnfundedChannelContext, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel};
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::Bolt11InvoiceFeatures;
/// State we hold per-peer.
pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
- /// `channel_id` -> `Channel`.
- ///
- /// Holds all funded channels where the peer is the counterparty.
- pub(super) channel_by_id: HashMap<ChannelId, Channel<SP>>,
- /// `temporary_channel_id` -> `OutboundV1Channel`.
- ///
- /// Holds all outbound V1 channels where the peer is the counterparty. Once an outbound channel has
- /// been assigned a `channel_id`, the entry in this map is removed and one is created in
- /// `channel_by_id`.
- pub(super) outbound_v1_channel_by_id: HashMap<ChannelId, OutboundV1Channel<SP>>,
- /// `temporary_channel_id` -> `InboundV1Channel`.
- ///
- /// Holds all inbound V1 channels where the peer is the counterparty. Once an inbound channel has
- /// been assigned a `channel_id`, the entry in this map is removed and one is created in
- /// `channel_by_id`.
- pub(super) inbound_v1_channel_by_id: HashMap<ChannelId, InboundV1Channel<SP>>,
+ /// `channel_id` -> `ChannelPhase`
+ ///
+ /// Holds all channels within corresponding `ChannelPhase`s where the peer is the counterparty.
+ pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
/// `temporary_channel_id` -> `InboundChannelRequest`.
///
/// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
if require_disconnected && self.is_connected {
return false
}
- self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
+ self.channel_by_id.iter().filter(|(_, phase)| matches!(phase, ChannelPhase::Funded(_))).count() == 0
+ && self.monitor_update_blocked_actions.is_empty()
&& self.in_flight_monitor_updates.is_empty()
}
// Returns a count of all channels we have with this peer, including unfunded channels.
fn total_channel_count(&self) -> usize {
- self.channel_by_id.len() +
- self.outbound_v1_channel_by_id.len() +
- self.inbound_v1_channel_by_id.len() +
- self.inbound_channel_request_by_id.len()
+ self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
}
// Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
fn has_channel(&self, channel_id: &ChannelId) -> bool {
- self.channel_by_id.contains_key(&channel_id) ||
- self.outbound_v1_channel_by_id.contains_key(&channel_id) ||
- self.inbound_v1_channel_by_id.contains_key(&channel_id) ||
- self.inbound_channel_request_by_id.contains_key(&channel_id)
+ self.channel_by_id.contains_key(channel_id) ||
+ self.inbound_channel_request_by_id.contains_key(channel_id)
}
}
/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
-/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the
-/// idempotency of payments by [`PaymentId`]. See
-/// [`OutboundPayments::remove_stale_resolved_payments`].
-pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
-
/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
/// until we mark the channel disabled and gossip the update.
pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
/// These include payments that have yet to find a successful path, or have unresolved HTLCs.
#[derive(Debug, PartialEq)]
pub enum RecentPaymentDetails {
+ /// When an invoice was requested and thus a payment has not yet been sent.
+ AwaitingInvoice {
+ /// Identifier for the payment to ensure idempotency.
+ payment_id: PaymentId,
+ },
/// When a payment is still being sent and awaiting successful delivery.
Pending {
/// Hash of the payment that is currently being sent but has yet to be fulfilled or
}
/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
-macro_rules! convert_chan_err {
- ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => {
+macro_rules! convert_chan_phase_err {
+ ($self: ident, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
match $err {
ChannelError::Warn(msg) => {
- (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
+ (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id))
},
ChannelError::Ignore(msg) => {
- (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+ (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
},
ChannelError::Close(msg) => {
- log_error!($self.logger, "Closing channel {} due to close-required error: {}", &$channel_id, msg);
- update_maps_on_chan_removal!($self, &$channel.context);
+ log_error!($self.logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
+ update_maps_on_chan_removal!($self, $channel.context);
let shutdown_res = $channel.context.force_shutdown(true);
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
- shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok(), $channel.context.get_value_satoshis()))
+ let user_id = $channel.context.get_user_id();
+ let channel_capacity_satoshis = $channel.context.get_value_satoshis();
+
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id,
+ shutdown_res, $channel_update, channel_capacity_satoshis))
},
}
};
- ($self: ident, $err: expr, $channel_context: expr, $channel_id: expr, UNFUNDED) => {
- match $err {
- // We should only ever have `ChannelError::Close` when unfunded channels error.
- // In any case, just close the channel.
- ChannelError::Warn(msg) | ChannelError::Ignore(msg) | ChannelError::Close(msg) => {
- log_error!($self.logger, "Closing unfunded channel {} due to an error: {}", &$channel_id, msg);
- update_maps_on_chan_removal!($self, &$channel_context);
- let shutdown_res = $channel_context.force_shutdown(false);
- (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
- shutdown_res, None, $channel_context.get_value_satoshis()))
+ ($self: ident, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => {
+ convert_chan_phase_err!($self, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() })
+ };
+ ($self: ident, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => {
+ convert_chan_phase_err!($self, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None)
+ };
+ ($self: ident, $err: expr, $channel_phase: expr, $channel_id: expr) => {
+ match $channel_phase {
+ ChannelPhase::Funded(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, FUNDED_CHANNEL)
+ },
+ ChannelPhase::UnfundedOutboundV1(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
+ },
+ ChannelPhase::UnfundedInboundV1(channel) => {
+ convert_chan_phase_err!($self, $err, channel, $channel_id, UNFUNDED_CHANNEL)
},
}
- }
+ };
}
-macro_rules! break_chan_entry {
+macro_rules! break_chan_phase_entry {
($self: ident, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
+ let key = *$entry.key();
+ let (drop, res) = convert_chan_phase_err!($self, e, $entry.get_mut(), &key);
if drop {
$entry.remove_entry();
}
}
}
-macro_rules! try_v1_outbound_chan_entry {
- ($self: ident, $res: expr, $entry: expr) => {
- match $res {
- Ok(res) => res,
- Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $entry.get_mut().context, $entry.key(), UNFUNDED);
- if drop {
- $entry.remove_entry();
- }
- return Err(res);
- }
- }
- }
-}
-
-macro_rules! try_chan_entry {
+macro_rules! try_chan_phase_entry {
($self: ident, $res: expr, $entry: expr) => {
match $res {
Ok(res) => res,
Err(e) => {
- let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
+ let key = *$entry.key();
+ let (drop, res) = convert_chan_phase_err!($self, e, $entry.get_mut(), &key);
if drop {
$entry.remove_entry();
}
}
}
-macro_rules! remove_channel {
+macro_rules! remove_channel_phase {
($self: expr, $entry: expr) => {
{
let channel = $entry.remove_entry().1;
- update_maps_on_chan_removal!($self, &channel.context);
+ update_maps_on_chan_removal!($self, &channel.context());
channel
}
}
handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
};
($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr, INITIAL_MONITOR) => {
- handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING_INITIAL_MONITOR, $chan_entry.remove_entry())
+ if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
+ handle_new_monitor_update!($self, $update_res, $peer_state_lock, $peer_state,
+ $per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
+ } else {
+ // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
+ // update).
+ debug_assert!(false);
+ let channel_id = *$chan_entry.key();
+ let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
+ "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
+ $chan_entry.get_mut(), &channel_id);
+ $chan_entry.remove();
+ Err(err)
+ }
};
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
})
} };
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
- handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
+ if let ChannelPhase::Funded(chan) = $chan_entry.get_mut() {
+ handle_new_monitor_update!($self, $funding_txo, $update, $peer_state_lock, $peer_state,
+ $per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
+ } else {
+ // We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
+ // update).
+ debug_assert!(false);
+ let channel_id = *$chan_entry.key();
+ let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
+ "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
+ $chan_entry.get_mut(), &channel_id);
+ $chan_entry.remove();
+ Err(err)
+ }
}
}
let res = channel.get_open_channel(self.genesis_hash.clone());
let temporary_channel_id = channel.context.channel_id();
- match peer_state.outbound_v1_channel_by_id.entry(temporary_channel_id) {
+ match peer_state.channel_by_id.entry(temporary_channel_id) {
hash_map::Entry::Occupied(_) => {
if cfg!(fuzzing) {
return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
panic!("RNG is bad???");
}
},
- hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
+ hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
}
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- // Only `Channels` in the channel_by_id map can be considered funded.
- for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
- let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone(), &self.fee_estimator);
- res.push(details);
- }
+ res.extend(peer_state.channel_by_id.iter()
+ .filter_map(|(chan_id, phase)| match phase {
+ // Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded.
+ ChannelPhase::Funded(chan) => Some((chan_id, chan)),
+ _ => None,
+ })
+ .filter(f)
+ .map(|(_channel_id, channel)| {
+ ChannelDetails::from_channel_context(&channel.context, best_block_height,
+ peer_state.latest_features.clone(), &self.fee_estimator)
+ })
+ );
}
}
res
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for (_channel_id, channel) in peer_state.channel_by_id.iter() {
- let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone(), &self.fee_estimator);
- res.push(details);
- }
- for (_channel_id, channel) in peer_state.inbound_v1_channel_by_id.iter() {
- let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
- peer_state.latest_features.clone(), &self.fee_estimator);
- res.push(details);
- }
- for (_channel_id, channel) in peer_state.outbound_v1_channel_by_id.iter() {
- let details = ChannelDetails::from_channel_context(&channel.context, best_block_height,
+ for context in peer_state.channel_by_id.iter().map(|(_, phase)| phase.context()) {
+ let details = ChannelDetails::from_channel_context(context, best_block_height,
peer_state.latest_features.clone(), &self.fee_estimator);
res.push(details);
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let features = &peer_state.latest_features;
- let chan_context_to_details = |context| {
+ let context_to_details = |context| {
ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
};
return peer_state.channel_by_id
.iter()
- .map(|(_, channel)| &channel.context)
- .chain(peer_state.outbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
- .chain(peer_state.inbound_v1_channel_by_id.iter().map(|(_, channel)| &channel.context))
- .map(chan_context_to_details)
+ .map(|(_, phase)| phase.context())
+ .map(context_to_details)
.collect();
}
vec![]
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
- .filter_map(|(_, pending_outbound_payment)| match pending_outbound_payment {
+ .filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
+ PendingOutboundPayment::AwaitingInvoice { .. } => {
+ Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
+ },
+ // InvoiceReceived is an intermediate state and doesn't need to be exposed
+ PendingOutboundPayment::InvoiceReceived { .. } => {
+ Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
+ },
PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
Some(RecentPaymentDetails::Pending {
payment_hash: *payment_hash,
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
- let funding_txo_opt = chan_entry.get().context.get_funding_txo();
- let their_features = &peer_state.latest_features;
- let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
- .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
- failed_htlcs = htlcs;
-
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg: shutdown_msg,
- });
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let funding_txo_opt = chan.context.get_funding_txo();
+ let their_features = &peer_state.latest_features;
+ let (shutdown_msg, mut monitor_update_opt, htlcs) =
+ chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
+ failed_htlcs = htlcs;
+
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg: shutdown_msg,
+ });
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt.take() {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
- }
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt.take() {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
+ }
- if chan_entry.get().is_shutdown() {
- let channel = remove_channel!(self, chan_entry);
- if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: channel_update
- });
+ if chan.is_shutdown() {
+ if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) {
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: channel_update
+ });
+ }
+ self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
+ }
}
- self.issue_channel_close_events(&channel.context, ClosureReason::HolderForceClosed);
+ break Ok(());
}
- break Ok(());
},
hash_map::Entry::Vacant(_) => (),
}
//
// An appropriate error will be returned for non-existence of the channel if that's the case.
return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ())
- // TODO(dunxen): This is still not ideal as we're doing some extra lookups.
- // Fix this with https://github.com/lightningdevkit/rust-lightning/issues/2422
};
for htlc_source in failed_htlcs.drain(..) {
} else {
ClosureReason::HolderForceClosed
};
- if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
- log_error!(self.logger, "Force-closing channel {}", &channel_id);
- self.issue_channel_close_events(&chan.get().context, closure_reason);
- let mut chan = remove_channel!(self, chan);
- self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
- (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
- } else if let hash_map::Entry::Occupied(chan) = peer_state.outbound_v1_channel_by_id.entry(channel_id.clone()) {
- log_error!(self.logger, "Force-closing channel {}", &channel_id);
- self.issue_channel_close_events(&chan.get().context, closure_reason);
- let mut chan = remove_channel!(self, chan);
- self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Unfunded channel has no update
- (None, chan.context.get_counterparty_node_id())
- } else if let hash_map::Entry::Occupied(chan) = peer_state.inbound_v1_channel_by_id.entry(channel_id.clone()) {
- log_error!(self.logger, "Force-closing channel {}", &channel_id);
- self.issue_channel_close_events(&chan.get().context, closure_reason);
- let mut chan = remove_channel!(self, chan);
- self.finish_force_close_channel(chan.context.force_shutdown(false));
- // Unfunded channel has no update
- (None, chan.context.get_counterparty_node_id())
+ if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
+ log_error!(self.logger, "Force-closing channel {}", channel_id);
+ self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason);
+ let mut chan_phase = remove_channel_phase!(self, chan_phase_entry);
+ match chan_phase {
+ ChannelPhase::Funded(mut chan) => {
+ self.finish_force_close_channel(chan.context.force_shutdown(broadcast));
+ (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id())
+ },
+ ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => {
+ self.finish_force_close_channel(chan_phase.context_mut().force_shutdown(false));
+ // Unfunded channel has no update
+ (None, chan_phase.context().get_counterparty_node_id())
+ },
+ }
} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
log_error!(self.logger, "Force-closing channel {}", &channel_id);
// N.B. that we don't send any channel close event here: we
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
+ let chan = match peer_state.channel_by_id.get_mut(&forwarding_id).map(
+ |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
+ ).flatten() {
None => {
// Channel was removed. The short_to_chan_info and channel_by_id maps
// have no consistency guarantees.
.ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
- if !chan.get().context.is_live() {
- return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
- }
- let funding_txo = chan.get().context.get_funding_txo().unwrap();
- let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
- htlc_cltv, HTLCSource::OutboundRoute {
- path: path.clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- payment_id,
- }, onion_packet, None, &self.fee_estimator, &self.logger);
- match break_chan_entry!(self, send_res, chan) {
- Some(monitor_update) => {
- match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
- Err(e) => break Err(e),
- Ok(false) => {
- // Note that MonitorUpdateInProgress here indicates (per function
- // docs) that we will resend the commitment update once monitor
- // updating completes. Therefore, we must return an error
- // indicating that it is unsafe to retry the payment wholesale,
- // which we do in the send_payment check for
- // MonitorUpdateInProgress, below.
- return Err(APIError::MonitorUpdateInProgress);
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
+ match chan_phase_entry.get_mut() {
+ ChannelPhase::Funded(chan) => {
+ if !chan.context.is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
+ }
+ let funding_txo = chan.context.get_funding_txo().unwrap();
+ let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
+ htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ payment_id,
+ }, onion_packet, None, &self.fee_estimator, &self.logger);
+ match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
+ Some(monitor_update) => {
+ match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
+ Err(e) => break Err(e),
+ Ok(false) => {
+ // Note that MonitorUpdateInProgress here indicates (per function
+ // docs) that we will resend the commitment update once monitor
+ // updating completes. Therefore, we must return an error
+ // indicating that it is unsafe to retry the payment wholesale,
+ // which we do in the send_payment check for
+ // MonitorUpdateInProgress, below.
+ return Err(APIError::MonitorUpdateInProgress);
+ },
+ Ok(true) => {},
+ }
},
- Ok(true) => {},
+ None => {},
}
},
- None => { },
- }
+ _ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
+ };
} else {
// The channel was likely removed after we fetched the id from the
// `short_to_chan_info` map, but before we successfully locked the
}
- /// Signals that no further retries for the given payment should occur. Useful if you have a
+ /// Signals that no further attempts for the given payment should occur. Useful if you have a
/// pending outbound payment with retries remaining, but wish to stop retrying the payment before
/// retries are exhausted.
///
+ /// # Event Generation
+ ///
/// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
/// as there are no remaining pending HTLCs for this payment.
///
/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
/// determine the ultimate status of a payment.
///
- /// If an [`Event::PaymentFailed`] event is generated and we restart without this
- /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated.
+ /// # Requested Invoices
///
- /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
- /// [`Event::PaymentSent`]: events::Event::PaymentSent
+ /// In the case of paying a [`Bolt12Invoice`], abandoning the payment prior to receiving the
+ /// invoice will result in an [`Event::InvoiceRequestFailed`] and prevent any attempts at paying
+ /// it once received. The other events may only be generated once the invoice has been received.
+ ///
+ /// # Restart Behavior
+ ///
+ /// If an [`Event::PaymentFailed`] is generated and we restart without first persisting the
+ /// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated; likewise for
+ /// [`Event::InvoiceRequestFailed`].
+ ///
+ /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
pub fn abandon_payment(&self, payment_id: PaymentId) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- let (chan, msg) = match peer_state.outbound_v1_channel_by_id.remove(&temporary_channel_id) {
- Some(chan) => {
+ let (chan, msg) = match peer_state.channel_by_id.remove(temporary_channel_id) {
+ Some(ChannelPhase::UnfundedOutboundV1(chan)) => {
let funding_txo = find_funding_output(&chan, &funding_transaction)?;
let funding_res = chan.get_funding_created(funding_transaction, funding_txo, &self.logger)
},
}
},
- None => {
- return Err(APIError::ChannelUnavailable {
+ Some(phase) => {
+ peer_state.channel_by_id.insert(*temporary_channel_id, phase);
+ return Err(APIError::APIMisuseError {
err: format!(
- "Channel with id {} not found for the passed counterparty node_id {}",
+ "Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
temporary_channel_id, counterparty_node_id),
})
},
+ None => return Err(APIError::ChannelUnavailable {err: format!(
+ "Channel with id {} not found for the passed counterparty node_id {}",
+ temporary_channel_id, counterparty_node_id),
+ }),
};
peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() {
panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
}
- e.insert(chan);
+ e.insert(ChannelPhase::Funded(chan));
}
}
Ok(())
};
}
for channel_id in channel_ids {
- if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
- let mut config = channel.context.config();
+ if let Some(channel_phase) = peer_state.channel_by_id.get_mut(channel_id) {
+ let mut config = channel_phase.context().config();
config.apply(config_update);
- if !channel.context.update_config(&config) {
+ if !channel_phase.context_mut().update_config(&config) {
continue;
}
- if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
- } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.context.get_counterparty_node_id(),
- msg,
- });
+ if let ChannelPhase::Funded(channel) = channel_phase {
+ if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+ } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg,
+ });
+ }
}
continue;
- }
-
- let context = if let Some(channel) = peer_state.inbound_v1_channel_by_id.get_mut(channel_id) {
- &mut channel.context
- } else if let Some(channel) = peer_state.outbound_v1_channel_by_id.get_mut(channel_id) {
- &mut channel.context
} else {
// This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
debug_assert!(false);
channel_id, counterparty_node_id),
});
};
- let mut config = context.config();
- config.apply(config_update);
- // We update the config, but we MUST NOT broadcast a `channel_update` before `channel_ready`
- // which would be the case for pending inbound/outbound channels.
- context.update_config(&config);
}
Ok(())
}
.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.get(&next_hop_channel_id) {
- Some(chan) => {
+ match peer_state.channel_by_id.get(next_hop_channel_id) {
+ Some(ChannelPhase::Funded(chan)) => {
if !chan.context.is_usable() {
return Err(APIError::ChannelUnavailable {
err: format!("Channel with id {} not fully established", next_hop_channel_id)
}
chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
},
+ Some(_) => return Err(APIError::ChannelUnavailable {
+ err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
+ next_hop_channel_id, next_node_id)
+ }),
None => return Err(APIError::ChannelUnavailable {
- err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
next_hop_channel_id, next_node_id)
})
}
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(forward_chan_id) {
- hash_map::Entry::Vacant(_) => {
- forwarding_channel_not_found!();
- continue;
- },
- hash_map::Entry::Occupied(mut chan) => {
- for forward_info in pending_forwards.drain(..) {
- match forward_info {
- HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
- forward_info: PendingHTLCInfo {
- incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
- routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
- },
- }) => {
- log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- user_channel_id: Some(prev_user_channel_id),
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- // Phantom payments are only PendingHTLCRouting::Receive.
- phantom_shared_secret: None,
- });
- if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
- payment_hash, outgoing_cltv_value, htlc_source.clone(),
- onion_packet, skimmed_fee_msat, &self.fee_estimator,
- &self.logger)
- {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
- } else {
- panic!("Stated return value requirements in send_htlc() were not met");
- }
- let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::reason(failure_code, data),
- HTLCDestination::NextHopChannel { node_id: Some(chan.get().context.get_counterparty_node_id()), channel_id: forward_chan_id }
- ));
- continue;
- }
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ for forward_info in pending_forwards.drain(..) {
+ match forward_info {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
+ forward_info: PendingHTLCInfo {
+ incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+ routing: PendingHTLCRouting::Forward { onion_packet, .. }, skimmed_fee_msat, ..
},
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
- },
- HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
- log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- if let Err(e) = chan.get_mut().queue_fail_htlc(
- htlc_id, err_packet, &self.logger
- ) {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_htlc() were not met");
- }
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
+ }) => {
+ log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ // Phantom payments are only PendingHTLCRouting::Receive.
+ phantom_shared_secret: None,
+ });
+ if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
+ payment_hash, outgoing_cltv_value, htlc_source.clone(),
+ onion_packet, skimmed_fee_msat, &self.fee_estimator,
+ &self.logger)
+ {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
}
- },
- }
+ let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::reason(failure_code, data),
+ HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
+ ));
+ continue;
+ }
+ },
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
+ if let Err(e) = chan.queue_fail_htlc(
+ htlc_id, err_packet, &self.logger
+ ) {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in queue_fail_htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ }
+ },
}
}
+ } else {
+ forwarding_channel_not_found!();
+ continue;
}
} else {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) {
- hash_map::Entry::Occupied(mut chan) => {
+ hash_map::Entry::Occupied(mut chan_phase) => {
updated_chan = true;
handle_new_monitor_update!(self, funding_txo, update.clone(),
- peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
+ peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
},
hash_map::Entry::Vacant(_) => Ok(()),
}
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(chan) = peer_state.channel_by_id.get_mut(&channel_id) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
} else {
let update_actions = peer_state.monitor_update_blocked_actions
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
+ for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
+ |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
+ ) {
let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
min_mempool_feerate
} else {
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
let mut pending_peers_awaiting_removal = Vec::new();
+
+ let process_unfunded_channel_tick = |
+ chan_id: &ChannelId,
+ context: &mut ChannelContext<SP>,
+ unfunded_context: &mut UnfundedChannelContext,
+ pending_msg_events: &mut Vec<MessageSendEvent>,
+ counterparty_node_id: PublicKey,
+ | {
+ context.maybe_expire_prev_config();
+ if unfunded_context.should_expire_unfunded_channel() {
+ log_error!(self.logger,
+ "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id);
+ update_maps_on_chan_removal!(self, &context);
+ self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed);
+ self.finish_force_close_channel(context.force_shutdown(false));
+ pending_msg_events.push(MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id: *chan_id,
+ data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
+ },
+ },
+ });
+ false
+ } else {
+ true
+ }
+ };
+
{
let per_peer_state = self.per_peer_state.read().unwrap();
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
let counterparty_node_id = *counterparty_node_id;
- peer_state.channel_by_id.retain(|chan_id, chan| {
- let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
- min_mempool_feerate
- } else {
- normal_feerate
- };
- let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
- if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
-
- if let Err(e) = chan.timer_check_closing_negotiation_progress() {
- let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
- handle_errors.push((Err(err), counterparty_node_id));
- if needs_close { return false; }
- }
-
- match chan.channel_update_status() {
- ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
- ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
- ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
- => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
- ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
- => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
- ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
- n += 1;
- if n >= DISABLE_GOSSIP_TICKS {
- chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- should_persist = NotifyOption::DoPersist;
+ peer_state.channel_by_id.retain(|chan_id, phase| {
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
+ min_mempool_feerate
} else {
- chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
- }
- },
- ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
- n += 1;
- if n >= ENABLE_GOSSIP_TICKS {
- chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- should_persist = NotifyOption::DoPersist;
- } else {
- chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
+ normal_feerate
+ };
+ let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
+ if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
+
+ if let Err(e) = chan.timer_check_closing_negotiation_progress() {
+ let (needs_close, err) = convert_chan_phase_err!(self, e, chan, chan_id, FUNDED_CHANNEL);
+ handle_errors.push((Err(err), counterparty_node_id));
+ if needs_close { return false; }
}
- },
- _ => {},
- }
- chan.context.maybe_expire_prev_config();
-
- if chan.should_disconnect_peer_awaiting_response() {
- log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
- counterparty_node_id, chan_id);
- pending_msg_events.push(MessageSendEvent::HandleError {
- node_id: counterparty_node_id,
- action: msgs::ErrorAction::DisconnectPeerWithWarning {
- msg: msgs::WarningMessage {
- channel_id: *chan_id,
- data: "Disconnecting due to timeout awaiting response".to_owned(),
+ match chan.channel_update_status() {
+ ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
+ ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
+ ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
+ => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+ ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
+ => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+ ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
+ n += 1;
+ if n >= DISABLE_GOSSIP_TICKS {
+ chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ } else {
+ chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
+ }
},
- },
- });
- }
+ ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
+ n += 1;
+ if n >= ENABLE_GOSSIP_TICKS {
+ chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ } else {
+ chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
+ }
+ },
+ _ => {},
+ }
- true
- });
+ chan.context.maybe_expire_prev_config();
+
+ if chan.should_disconnect_peer_awaiting_response() {
+ log_debug!(self.logger, "Disconnecting peer {} due to not making any progress on channel {}",
+ counterparty_node_id, chan_id);
+ pending_msg_events.push(MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::DisconnectPeerWithWarning {
+ msg: msgs::WarningMessage {
+ channel_id: *chan_id,
+ data: "Disconnecting due to timeout awaiting response".to_owned(),
+ },
+ },
+ });
+ }
- let process_unfunded_channel_tick = |
- chan_id: &ChannelId,
- chan_context: &mut ChannelContext<SP>,
- unfunded_chan_context: &mut UnfundedChannelContext,
- pending_msg_events: &mut Vec<MessageSendEvent>,
- | {
- chan_context.maybe_expire_prev_config();
- if unfunded_chan_context.should_expire_unfunded_channel() {
- log_error!(self.logger,
- "Force-closing pending channel with ID {} for not establishing in a timely manner",
- &chan_id);
- update_maps_on_chan_removal!(self, &chan_context);
- self.issue_channel_close_events(&chan_context, ClosureReason::HolderForceClosed);
- self.finish_force_close_channel(chan_context.force_shutdown(false));
- pending_msg_events.push(MessageSendEvent::HandleError {
- node_id: counterparty_node_id,
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage {
- channel_id: *chan_id,
- data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
- },
- },
- });
- false
- } else {
- true
+ true
+ },
+ ChannelPhase::UnfundedInboundV1(chan) => {
+ process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+ pending_msg_events, counterparty_node_id)
+ },
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ process_unfunded_channel_tick(chan_id, &mut chan.context, &mut chan.unfunded_context,
+ pending_msg_events, counterparty_node_id)
+ },
}
- };
- peer_state.outbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
- chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events));
- peer_state.inbound_v1_channel_by_id.retain(|chan_id, chan| process_unfunded_channel_tick(
- chan_id, &mut chan.context, &mut chan.unfunded_context, pending_msg_events));
+ });
for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
let _ = handle_error!(self, err, counterparty_node_id);
}
- self.pending_outbound_payments.remove_stale_resolved_payments(&self.pending_events);
+ self.pending_outbound_payments.remove_stale_payments(&self.pending_events);
// Technically we don't need to do this here, but if we have holding cell entries in a
// channel that need freeing, it's better to do that here and block a background task
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(channel_id) {
- hash_map::Entry::Occupied(chan_entry) => {
- self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
+ hash_map::Entry::Occupied(chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get() {
+ self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan)
+ } else {
+ // We shouldn't be trying to fail holding cell HTLCs on an unfunded channel.
+ debug_assert!(false);
+ (0x4000|10, Vec::new())
+ }
},
hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
}
if peer_state_opt.is_some() {
let mut peer_state_lock = peer_state_opt.unwrap();
let peer_state = &mut *peer_state_lock;
- if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
- let counterparty_node_id = chan.get().context.get_counterparty_node_id();
- let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
-
- if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
- if let Some(action) = completion_action(Some(htlc_value_msat)) {
- log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
- &chan_id, action);
- peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
- }
- if !during_init {
- let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
- peer_state, per_peer_state, chan);
- if let Err(e) = res {
- // TODO: This is a *critical* error - we probably updated the outbound edge
- // of the HTLC's monitor with a preimage. We should retry this monitor
- // update over and over again until morale improves.
- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
- return Err((counterparty_node_id, e));
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let counterparty_node_id = chan.context.get_counterparty_node_id();
+ let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
+
+ if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
+ if let Some(action) = completion_action(Some(htlc_value_msat)) {
+ log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
+ chan_id, action);
+ peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
+ }
+ if !during_init {
+ let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
+ peer_state, per_peer_state, chan_phase_entry);
+ if let Err(e) = res {
+ // TODO: This is a *critical* error - we probably updated the outbound edge
+ // of the HTLC's monitor with a preimage. We should retry this monitor
+ // update over and over again until morale improves.
+ log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
+ return Err((counterparty_node_id, e));
+ }
+ } else {
+ // If we're running during init we cannot update a monitor directly -
+ // they probably haven't actually been loaded yet. Instead, push the
+ // monitor update as a background event.
+ self.pending_background_events.lock().unwrap().push(
+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
+ counterparty_node_id,
+ funding_txo: prev_hop.outpoint,
+ update: monitor_update.clone(),
+ });
}
- } else {
- // If we're running during init we cannot update a monitor directly -
- // they probably haven't actually been loaded yet. Instead, push the
- // monitor update as a background event.
- self.pending_background_events.lock().unwrap().push(
- BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
- counterparty_node_id,
- funding_txo: prev_hop.outpoint,
- update: monitor_update.clone(),
- });
}
}
return Ok(());
peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let channel =
- if let Some(chan) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) {
chan
} else {
let update_actions = peer_state.monitor_update_blocked_actions
msg: channel.accept_inbound_channel(),
});
- peer_state.inbound_v1_channel_by_id.insert(temporary_channel_id.clone(), channel);
+ peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
Ok(())
}
peer: &PeerState<SP>, best_block_height: u32
) -> usize {
let mut num_unfunded_channels = 0;
- for (_, chan) in peer.channel_by_id.iter() {
- // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
- // which have not yet had any confirmations on-chain.
- if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
- chan.context.get_funding_tx_confirmations(best_block_height) == 0
- {
- num_unfunded_channels += 1;
- }
- }
- for (_, chan) in peer.inbound_v1_channel_by_id.iter() {
- if chan.context.minimum_depth().unwrap_or(1) != 0 {
- num_unfunded_channels += 1;
+ for (_, phase) in peer.channel_by_id.iter() {
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ // This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
+ // which have not yet had any confirmations on-chain.
+ if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
+ chan.context.get_funding_tx_confirmations(best_block_height) == 0
+ {
+ num_unfunded_channels += 1;
+ }
+ },
+ ChannelPhase::UnfundedInboundV1(chan) => {
+ if chan.context.minimum_depth().unwrap_or(1) != 0 {
+ num_unfunded_channels += 1;
+ }
+ },
+ ChannelPhase::UnfundedOutboundV1(_) => {
+ // Outbound channels don't contribute to the unfunded count in the DoS context.
+ continue;
+ }
}
}
num_unfunded_channels + peer.inbound_channel_request_by_id.len()
node_id: counterparty_node_id.clone(),
msg: channel.accept_inbound_channel(),
});
- peer_state.inbound_v1_channel_by_id.insert(channel_id, channel);
+ peer_state.channel_by_id.insert(channel_id, ChannelPhase::UnfundedInboundV1(channel));
Ok(())
}
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- match peer_state.outbound_v1_channel_by_id.entry(msg.temporary_channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- try_v1_outbound_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
- (chan.get().context.get_value_satoshis(), chan.get().context.get_funding_redeemscript().to_v0_p2wsh(), chan.get().context.get_user_id())
+ match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+ hash_map::Entry::Occupied(mut phase) => {
+ match phase.get_mut() {
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ try_chan_phase_entry!(self, chan.accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
+ (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_v0_p2wsh(), chan.context.get_user_id())
+ },
+ _ => {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+ }
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let (chan, funding_msg, monitor) =
- match peer_state.inbound_v1_channel_by_id.remove(&msg.temporary_channel_id) {
- Some(inbound_chan) => {
+ match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
+ Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &self.logger) {
Ok(res) => res,
Err((mut inbound_chan, err)) => {
},
}
},
+ Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
+ },
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
};
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
- let chan = e.insert(chan);
- let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
- per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
- { peer_state.channel_by_id.remove(&new_channel_id) });
-
- // Note that we reply with the new channel_id in error messages if we gave up on the
- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
- // any messages referencing a previously-closed channel anyway.
- // We do not propagate the monitor update to the user as it would be for a monitor
- // that we didn't manage to store (and that we don't care about - we don't respond
- // with the funding_signed so the channel can never go on chain).
- if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
- res.0 = None;
+ if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
+ let mut res = handle_new_monitor_update!(self, monitor_res, peer_state_lock, peer_state,
+ per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
+ { peer_state.channel_by_id.remove(&new_channel_id) });
+
+ // Note that we reply with the new channel_id in error messages if we gave up on the
+ // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+ // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+ // any messages referencing a previously-closed channel anyway.
+ // We do not propagate the monitor update to the user as it would be for a monitor
+ // that we didn't manage to store (and that we don't care about - we don't respond
+ // with the funding_signed so the channel can never go on chain).
+ if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
+ res.0 = None;
+ }
+ res.map(|_| ())
+ } else {
+ unreachable!("This must be a funded channel as we just inserted it.");
}
- res.map(|_| ())
}
}
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- let monitor = try_chan_entry!(self,
- chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
- let update_res = self.chain_monitor.watch_channel(chan.get().context.get_funding_txo().unwrap(), monitor);
- let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
- // We weren't able to watch the channel to begin with, so no updates should be made on
- // it. Previously, full_stack_target found an (unreachable) panic when the
- // monitor update contained within `shutdown_finish` was applied.
- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
- shutdown_finish.0.take();
- }
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ match chan_phase_entry.get_mut() {
+ ChannelPhase::Funded(ref mut chan) => {
+ let monitor = try_chan_phase_entry!(self,
+ chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
+ let update_res = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor);
+ let mut res = handle_new_monitor_update!(self, update_res, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
+ }
+ }
+ res.map(|_| ())
+ },
+ _ => {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
+ },
}
- res.map(|_| ())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
- self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
- if let Some(announcement_sigs) = announcement_sigs_opt {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", &chan.get().context.channel_id());
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: counterparty_node_id.clone(),
- msg: announcement_sigs,
- });
- } else if chan.get().context.is_usable() {
- // If we're sending an announcement_signatures, we'll send the (public)
- // channel_update after sending a channel_announcement when we receive our
- // counterparty's announcement_signatures. Thus, we only bother to send a
- // channel_update here if the channel is not public, i.e. we're not sending an
- // announcement_signatures.
- log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", &chan.get().context.channel_id());
- if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let announcement_sigs_opt = try_chan_phase_entry!(self, chan.channel_ready(&msg, &self.node_signer,
+ self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan_phase_entry);
+ if let Some(announcement_sigs) = announcement_sigs_opt {
+ log_trace!(self.logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
node_id: counterparty_node_id.clone(),
- msg,
+ msg: announcement_sigs,
});
+ } else if chan.context.is_usable() {
+ // If we're sending an announcement_signatures, we'll send the (public)
+ // channel_update after sending a channel_announcement when we receive our
+ // counterparty's announcement_signatures. Thus, we only bother to send a
+ // channel_update here if the channel is not public, i.e. we're not sending an
+ // announcement_signatures.
+ log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
+ if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: counterparty_node_id.clone(),
+ msg,
+ });
+ }
}
- }
- {
- let mut pending_events = self.pending_events.lock().unwrap();
- emit_channel_ready_event!(pending_events, chan.get_mut());
- }
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ emit_channel_ready_event!(pending_events, chan);
+ }
- Ok(())
+ Ok(())
+ } else {
+ try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
+ }
},
- hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ hash_map::Entry::Vacant(_) => {
+ Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ }
}
}
})?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- // TODO(dunxen): Fix this duplication when we switch to a single map with enums as per
- // https://github.com/lightningdevkit/rust-lightning/issues/2422
- if let hash_map::Entry::Occupied(chan_entry) = peer_state.outbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
- log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
- self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
- let mut chan = remove_channel!(self, chan_entry);
- self.finish_force_close_channel(chan.context.force_shutdown(false));
- return Ok(());
- } else if let hash_map::Entry::Occupied(chan_entry) = peer_state.inbound_v1_channel_by_id.entry(msg.channel_id.clone()) {
- log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
- self.issue_channel_close_events(&chan_entry.get().context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
- let mut chan = remove_channel!(self, chan_entry);
- self.finish_force_close_channel(chan.context.force_shutdown(false));
- return Ok(());
- } else if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
- if !chan_entry.get().received_shutdown() {
- log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
- &msg.channel_id,
- if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
- }
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+ let phase = chan_phase_entry.get_mut();
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ if !chan.received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ msg.channel_id,
+ if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
- let funding_txo_opt = chan_entry.get().context.get_funding_txo();
- let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
- chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
- dropped_htlcs = htlcs;
-
- if let Some(msg) = shutdown {
- // We can send the `shutdown` message before updating the `ChannelMonitor`
- // here as we don't need the monitor update to complete until we send a
- // `shutdown_signed`, which we'll delay if we're pending a monitor update.
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
+ let funding_txo_opt = chan.context.get_funding_txo();
+ let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self,
+ chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
+ dropped_htlcs = htlcs;
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update_opt {
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan_entry).map(|_| ());
+ if let Some(msg) = shutdown {
+ // We can send the `shutdown` message before updating the `ChannelMonitor`
+ // here as we don't need the monitor update to complete until we send a
+ // `shutdown_signed`, which we'll delay if we're pending a monitor update.
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update_opt {
+ break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
+ }
+ break Ok(());
+ },
+ ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+ let context = phase.context_mut();
+ log_error!(self.logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
+ self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
+ let mut chan = remove_channel_phase!(self, chan_phase_entry);
+ self.finish_force_close_channel(chan.context_mut().force_shutdown(false));
+ return Ok(());
+ },
}
- break Ok(());
} else {
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
- let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
- if let Some(msg) = closing_signed {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
+ if let Some(msg) = closing_signed {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: counterparty_node_id.clone(),
+ msg,
+ });
+ }
+ if tx.is_some() {
+ // We're done with this channel, we've got a signed closing transaction and
+ // will send the closing_signed back to the remote peer upon return. This
+ // also implies there are no pending HTLCs left on the channel, so we can
+ // fully delete it from tracking (the channel monitor is still around to
+ // watch for old state broadcasts)!
+ (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
+ } else { (tx, None) }
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
}
- if tx.is_some() {
- // We're done with this channel, we've got a signed closing transaction and
- // will send the closing_signed back to the remote peer upon return. This
- // also implies there are no pending HTLCs left on the channel, so we can
- // fully delete it from tracking (the channel monitor is still around to
- // watch for old state broadcasts)!
- (tx, Some(remove_channel!(self, chan_entry)))
- } else { (tx, None) }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
}
- if let Some(chan) = chan_option {
+ if let Some(ChannelPhase::Funded(chan)) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
-
- let pending_forward_info = match decoded_hop_res {
- Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
- self.construct_pending_htlc_status(msg, shared_secret, next_hop,
- chan.get().context.config().accept_underpaying_htlcs, next_packet_pk_opt),
- Err(e) => PendingHTLCStatus::Fail(e)
- };
- let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
- // If the update_add is completely bogus, the call will Err and we will close,
- // but if we've sent a shutdown and they haven't acknowledged it yet, we just
- // want to reject the new HTLC and fail it backwards instead of forwarding.
- match pending_forward_info {
- PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
- let reason = if (error_code & 0x1000) != 0 {
- let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
- HTLCFailReason::reason(real_code, error_data)
- } else {
- HTLCFailReason::from_failure_code(error_code)
- }.get_encrypted_failure_packet(incoming_shared_secret, &None);
- let msg = msgs::UpdateFailHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- reason
- };
- PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
- },
- _ => pending_forward_info
- }
- };
- try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan);
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let pending_forward_info = match decoded_hop_res {
+ Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
+ self.construct_pending_htlc_status(msg, shared_secret, next_hop,
+ chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt),
+ Err(e) => PendingHTLCStatus::Fail(e)
+ };
+ let create_pending_htlc_status = |chan: &Channel<SP>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ // If the update_add is completely bogus, the call will Err and we will close,
+ // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+ // want to reject the new HTLC and fail it backwards instead of forwarding.
+ match pending_forward_info {
+ PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
+ let reason = if (error_code & 0x1000) != 0 {
+ let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+ HTLCFailReason::reason(real_code, error_data)
+ } else {
+ HTLCFailReason::from_failure_code(error_code)
+ }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+ let msg = msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason
+ };
+ PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
+ },
+ _ => pending_forward_info
+ }
+ };
+ try_chan_phase_entry!(self, chan.update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.fee_estimator, &self.logger), chan_phase_entry);
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan);
- funding_txo = chan.get().context.get_funding_txo().expect("We won't accept a fulfill until funded");
- res
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let res = try_chan_phase_entry!(self, chan.update_fulfill_htlc(&msg), chan_phase_entry);
+ funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
+ res
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ try_chan_phase_entry!(self, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
if (msg.failure_code & 0x8000) == 0 {
let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
- try_chan_entry!(self, Err(chan_err), chan);
+ try_chan_phase_entry!(self, Err(chan_err), chan_phase_entry);
+ }
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ try_chan_phase_entry!(self, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
}
- try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
Ok(())
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- let funding_txo = chan.get().context.get_funding_txo();
- let monitor_update_opt = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
- if let Some(monitor_update) = monitor_update_opt {
- handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
- peer_state, per_peer_state, chan).map(|_| ())
- } else { Ok(()) }
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let funding_txo = chan.context.get_funding_txo();
+ let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
+ if let Some(monitor_update) = monitor_update_opt {
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
+ peer_state, per_peer_state, chan_phase_entry).map(|_| ())
+ } else { Ok(()) }
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}).map(|mtx| mtx.lock().unwrap())?;
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- let funding_txo_opt = chan.get().context.get_funding_txo();
- let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
- self.raa_monitor_updates_held(
- &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
- *counterparty_node_id)
- } else { false };
- let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self,
- chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan);
- let res = if let Some(monitor_update) = monitor_update_opt {
- let funding_txo = funding_txo_opt
- .expect("Funding outpoint must have been set for RAA handling to succeed");
- handle_new_monitor_update!(self, funding_txo, monitor_update,
- peer_state_lock, peer_state, per_peer_state, chan).map(|_| ())
- } else { Ok(()) };
- (htlcs_to_fail, res)
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ let funding_txo_opt = chan.context.get_funding_txo();
+ let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
+ self.raa_monitor_updates_held(
+ &peer_state.actions_blocking_raa_monitor_updates, funding_txo,
+ *counterparty_node_id)
+ } else { false };
+ let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
+ chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
+ let res = if let Some(monitor_update) = monitor_update_opt {
+ let funding_txo = funding_txo_opt
+ .expect("Funding outpoint must have been set for RAA handling to succeed");
+ handle_new_monitor_update!(self, funding_txo, monitor_update,
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
+ } else { Ok(()) };
+ (htlcs_to_fail, res)
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ try_chan_phase_entry!(self, chan.update_fee(&self.fee_estimator, &msg, &self.logger), chan_phase_entry);
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if !chan.get().context.is_usable() {
- return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
- }
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ if !chan.context.is_usable() {
+ return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
+ }
- peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
- msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
- &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
- msg, &self.default_configuration
- ), chan),
- // Note that announcement_signatures fails if the channel cannot be announced,
- // so get_channel_update_for_broadcast will never fail by the time we get here.
- update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()),
- });
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+ msg: try_chan_phase_entry!(self, chan.announcement_signatures(
+ &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
+ msg, &self.default_configuration
+ ), chan_phase_entry),
+ // Note that announcement_signatures fails if the channel cannot be announced,
+ // so get_channel_update_for_broadcast will never fail by the time we get here.
+ update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
+ });
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
+ }
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(chan_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().context.get_counterparty_node_id() != *counterparty_node_id {
- if chan.get().context.should_announce() {
- // If the announcement is about a channel of ours which is public, some
- // other peer may simply be forwarding all its gossip to us. Don't provide
- // a scary-looking error message and return Ok instead.
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ if chan.context.get_counterparty_node_id() != *counterparty_node_id {
+ if chan.context.should_announce() {
+ // If the announcement is about a channel of ours which is public, some
+ // other peer may simply be forwarding all its gossip to us. Don't provide
+ // a scary-looking error message and return Ok instead.
+ return Ok(NotifyOption::SkipPersist);
+ }
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+ }
+ let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
+ let msg_from_node_one = msg.contents.flags & 1 == 0;
+ if were_node_one == msg_from_node_one {
return Ok(NotifyOption::SkipPersist);
+ } else {
+ log_debug!(self.logger, "Received channel_update for channel {}.", chan_id);
+ try_chan_phase_entry!(self, chan.channel_update(&msg), chan_phase_entry);
}
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
- }
- let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().context.get_counterparty_node_id().serialize()[..];
- let msg_from_node_one = msg.contents.flags & 1 == 0;
- if were_node_one == msg_from_node_one {
- return Ok(NotifyOption::SkipPersist);
} else {
- log_debug!(self.logger, "Received channel_update for channel {}.", &chan_id);
- try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
}
},
hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- // Currently, we expect all holding cell update_adds to be dropped on peer
- // disconnect, so Channel's reestablish will never hand us any holding cell
- // freed HTLCs to fail backwards. If in the future we no longer drop pending
- // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
- let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
- msg, &self.logger, &self.node_signer, self.genesis_hash,
- &self.default_configuration, &*self.best_block.read().unwrap()), chan);
- let mut channel_update = None;
- if let Some(msg) = responses.shutdown_msg {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- } else if chan.get().context.is_usable() {
- // If the channel is in a usable state (ie the channel is not being shut
- // down), send a unicast channel_update to our counterparty to make sure
- // they have the latest channel parameters.
- if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
- channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
- node_id: chan.get().context.get_counterparty_node_id(),
+ hash_map::Entry::Occupied(mut chan_phase_entry) => {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ // Currently, we expect all holding cell update_adds to be dropped on peer
+ // disconnect, so Channel's reestablish will never hand us any holding cell
+ // freed HTLCs to fail backwards. If in the future we no longer drop pending
+ // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+ let responses = try_chan_phase_entry!(self, chan.channel_reestablish(
+ msg, &self.logger, &self.node_signer, self.genesis_hash,
+ &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
+ let mut channel_update = None;
+ if let Some(msg) = responses.shutdown_msg {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: counterparty_node_id.clone(),
msg,
});
+ } else if chan.context.is_usable() {
+ // If the channel is in a usable state (ie the channel is not being shut
+ // down), send a unicast channel_update to our counterparty to make sure
+ // they have the latest channel parameters.
+ if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
+ channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+ node_id: chan.context.get_counterparty_node_id(),
+ msg,
+ });
+ }
}
+ let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
+ htlc_forwards = self.handle_channel_resumption(
+ &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
+ Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ if let Some(upd) = channel_update {
+ peer_state.pending_msg_events.push(upd);
+ }
+ need_lnd_workaround
+ } else {
+ return try_chan_phase_entry!(self, Err(ChannelError::Close(
+ "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
}
- let need_lnd_workaround = chan.get_mut().context.workaround_lnd_bug_4006.take();
- htlc_forwards = self.handle_channel_resumption(
- &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
- Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
- if let Some(upd) = channel_update {
- peer_state.pending_msg_events.push(upd);
- }
- need_lnd_workaround
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
- let mut chan = remove_channel!(self, chan_entry);
- failed_channels.push(chan.context.force_shutdown(false));
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
+ if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
+ if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
+ failed_channels.push(chan.context.force_shutdown(false));
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
+ ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
+ } else {
+ ClosureReason::CommitmentTxConfirmed
+ };
+ self.issue_channel_close_events(&chan.context, reason);
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
+ },
});
}
- let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
- ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
- } else {
- ClosureReason::CommitmentTxConfirmed
- };
- self.issue_channel_close_events(&chan.context, reason);
- pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: chan.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() }
- },
- });
}
}
}
'chan_loop: loop {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
- for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
+ for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
+ |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
+ ) {
let counterparty_node_id = chan.context.get_counterparty_node_id();
let funding_txo = chan.context.get_funding_txo();
let (monitor_opt, holding_cell_failed_htlcs) =
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.retain(|channel_id, chan| {
- match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
- Ok((msg_opt, tx_opt)) => {
- if let Some(msg) = msg_opt {
- has_update = true;
- pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: chan.context.get_counterparty_node_id(), msg,
- });
- }
- if let Some(tx) = tx_opt {
- // We're done with this channel. We got a closing_signed and sent back
- // a closing_signed with a closing transaction to broadcast.
- if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
+ peer_state.channel_by_id.retain(|channel_id, phase| {
+ match phase {
+ ChannelPhase::Funded(chan) => {
+ match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
+ Ok((msg_opt, tx_opt)) => {
+ if let Some(msg) = msg_opt {
+ has_update = true;
+ pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: chan.context.get_counterparty_node_id(), msg,
+ });
+ }
+ if let Some(tx) = tx_opt {
+ // We're done with this channel. We got a closing_signed and sent back
+ // a closing_signed with a closing transaction to broadcast.
+ if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
- self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
+ self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
- log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
- self.tx_broadcaster.broadcast_transactions(&[&tx]);
- update_maps_on_chan_removal!(self, &chan.context);
- false
- } else { true }
+ log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
+ self.tx_broadcaster.broadcast_transactions(&[&tx]);
+ update_maps_on_chan_removal!(self, &chan.context);
+ false
+ } else { true }
+ },
+ Err(e) => {
+ has_update = true;
+ let (close_channel, res) = convert_chan_phase_err!(self, e, chan, channel_id, FUNDED_CHANNEL);
+ handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
+ !close_channel
+ }
+ }
},
- Err(e) => {
- has_update = true;
- let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
- handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
- !close_channel
- }
+ _ => true, // Retain unfunded channels if present.
}
});
}
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for chan in peer_state.channel_by_id.values() {
+ for chan in peer_state.channel_by_id.values().filter_map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ) {
for (htlc_source, _) in chan.inflight_htlc_sources() {
if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
inflight_htlcs.process_path(path, self.get_our_node_id());
break;
}
- if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
- debug_assert_eq!(chan.get().context.get_funding_txo().unwrap(), channel_funding_outpoint);
- if let Some((monitor_update, further_update_exists)) = chan.get_mut().unblock_next_blocked_monitor_update() {
- log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
- &channel_funding_outpoint.to_channel_id());
- if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
- peer_state_lck, peer_state, per_peer_state, chan)
- {
- errors.push((e, counterparty_node_id));
- }
- if further_update_exists {
- // If there are more `ChannelMonitorUpdate`s to process, restart at the
- // top of the loop.
- continue;
+ if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) {
+ if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
+ debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
+ if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
+ log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
+ channel_funding_outpoint.to_channel_id());
+ if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
+ peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
+ {
+ errors.push((e, counterparty_node_id));
+ }
+ if further_update_exists {
+ // If there are more `ChannelMonitorUpdate`s to process, restart at the
+ // top of the loop.
+ continue;
+ }
+ } else {
+ log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
+ channel_funding_outpoint.to_channel_id());
}
- } else {
- log_trace!(self.logger, "Unlocked monitor updating for channel {} without monitors to update",
- &channel_funding_outpoint.to_channel_id());
}
}
} else {
for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for chan in peer_state.channel_by_id.values() {
+ for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
if let (Some(funding_txo), Some(block_hash)) = (chan.context.get_funding_txo(), chan.context.get_funding_tx_confirmed_in()) {
res.push((funding_txo.txid, Some(block_hash)));
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.retain(|_, channel| {
- let res = f(channel);
- if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
- for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
- let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
- timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
- HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
- }
- if let Some(channel_ready) = channel_ready_opt {
- send_channel_ready!(self, pending_msg_events, channel, channel_ready);
- if channel.context.is_usable() {
- log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", &channel.context.channel_id());
- if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
- pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.context.get_counterparty_node_id(),
- msg,
- });
+ peer_state.channel_by_id.retain(|_, phase| {
+ match phase {
+ // Retain unfunded channels.
+ ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => true,
+ ChannelPhase::Funded(channel) => {
+ let res = f(channel);
+ if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
+ for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
+ let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
+ timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
+ HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
+ }
+ if let Some(channel_ready) = channel_ready_opt {
+ send_channel_ready!(self, pending_msg_events, channel, channel_ready);
+ if channel.context.is_usable() {
+ log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
+ if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+ pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg,
+ });
+ }
+ } else {
+ log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
+ }
}
- } else {
- log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", &channel.context.channel_id());
- }
- }
- {
- let mut pending_events = self.pending_events.lock().unwrap();
- emit_channel_ready_event!(pending_events, channel);
- }
+ {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ emit_channel_ready_event!(pending_events, channel);
+ }
- if let Some(announcement_sigs) = announcement_sigs {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", &channel.context.channel_id());
- pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: channel.context.get_counterparty_node_id(),
- msg: announcement_sigs,
- });
- if let Some(height) = height_opt {
- if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
- msg: announcement,
- // Note that announcement_signatures fails if the channel cannot be announced,
- // so get_channel_update_for_broadcast will never fail by the time we get here.
- update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
+ if let Some(announcement_sigs) = announcement_sigs {
+ log_trace!(self.logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg: announcement_sigs,
});
+ if let Some(height) = height_opt {
+ if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+ msg: announcement,
+ // Note that announcement_signatures fails if the channel cannot be announced,
+ // so get_channel_update_for_broadcast will never fail by the time we get here.
+ update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
+ });
+ }
+ }
}
+ if channel.is_our_channel_ready() {
+ if let Some(real_scid) = channel.context.get_short_channel_id() {
+ // If we sent a 0conf channel_ready, and now have an SCID, we add it
+ // to the short_to_chan_info map here. Note that we check whether we
+ // can relay using the real SCID at relay-time (i.e.
+ // enforce option_scid_alias then), and if the funding tx is ever
+ // un-confirmed we force-close the channel, ensuring short_to_chan_info
+ // is always consistent.
+ let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
+ let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
+ assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
+ "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
+ fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
+ }
+ }
+ } else if let Err(reason) = res {
+ update_maps_on_chan_removal!(self, &channel.context);
+ // It looks like our counterparty went on-chain or funding transaction was
+ // reorged out of the main chain. Close the channel.
+ failed_channels.push(channel.context.force_shutdown(true));
+ if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ let reason_message = format!("{}", reason);
+ self.issue_channel_close_events(&channel.context, reason);
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
+ channel_id: channel.context.channel_id(),
+ data: reason_message,
+ } },
+ });
+ return false;
}
+ true
}
- if channel.is_our_channel_ready() {
- if let Some(real_scid) = channel.context.get_short_channel_id() {
- // If we sent a 0conf channel_ready, and now have an SCID, we add it
- // to the short_to_chan_info map here. Note that we check whether we
- // can relay using the real SCID at relay-time (i.e.
- // enforce option_scid_alias then), and if the funding tx is ever
- // un-confirmed we force-close the channel, ensuring short_to_chan_info
- // is always consistent.
- let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
- let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
- assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
- "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
- fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
- }
- }
- } else if let Err(reason) = res {
- update_maps_on_chan_removal!(self, &channel.context);
- // It looks like our counterparty went on-chain or funding transaction was
- // reorged out of the main chain. Close the channel.
- failed_channels.push(channel.context.force_shutdown(true));
- if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
- }
- let reason_message = format!("{}", reason);
- self.issue_channel_close_events(&channel.context, reason);
- pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
- channel_id: channel.context.channel_id(),
- data: reason_message,
- } },
- });
- return false;
}
- true
});
}
}
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- peer_state.channel_by_id.retain(|_, chan| {
- chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- if chan.is_shutdown() {
- update_maps_on_chan_removal!(self, &chan.context);
- self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
- return false;
- }
- true
- });
- peer_state.inbound_v1_channel_by_id.retain(|_, chan| {
- update_maps_on_chan_removal!(self, &chan.context);
- self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
- false
- });
- peer_state.outbound_v1_channel_by_id.retain(|_, chan| {
- update_maps_on_chan_removal!(self, &chan.context);
- self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
+ peer_state.channel_by_id.retain(|_, phase| {
+ let context = match phase {
+ ChannelPhase::Funded(chan) => {
+ chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+ // We only retain funded channels that are not shutdown.
+ if !chan.is_shutdown() {
+ return true;
+ }
+ &chan.context
+ },
+ // Unfunded channels will always be removed.
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ &chan.context
+ },
+ ChannelPhase::UnfundedInboundV1(chan) => {
+ &chan.context
+ },
+ };
+ // Clean up for removal.
+ update_maps_on_chan_removal!(self, &context);
+ self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
false
});
// Note that we don't bother generating any events for pre-accept channels -
}
e.insert(Mutex::new(PeerState {
channel_by_id: HashMap::new(),
- outbound_v1_channel_by_id: HashMap::new(),
- inbound_v1_channel_by_id: HashMap::new(),
inbound_channel_request_by_id: HashMap::new(),
latest_features: init_msg.features.clone(),
pending_msg_events: Vec::new(),
let peer_state = &mut *peer_state_lock;
let pending_msg_events = &mut peer_state.pending_msg_events;
- // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
- // (so won't be recovered after a crash) we don't need to bother closing unfunded channels and
- // clearing their maps here. Instead we can just send queue channel_reestablish messages for
- // channels in the channel_by_id map.
- peer_state.channel_by_id.iter_mut().for_each(|(_, chan)| {
+ peer_state.channel_by_id.iter_mut().filter_map(|(_, phase)|
+ if let ChannelPhase::Funded(chan) = phase { Some(chan) } else {
+ // Since unfunded channel maps are cleared upon disconnecting a peer, and they're not persisted
+ // (so won't be recovered after a crash), they shouldn't exist here and we would never need to
+ // worry about closing and removing them.
+ debug_assert!(false);
+ None
+ }
+ ).for_each(|chan| {
pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
node_id: chan.context.get_counterparty_node_id(),
msg: chan.get_channel_reestablish(&self.logger),
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
- if let Some(chan) = peer_state.channel_by_id.get(&msg.channel_id) {
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
if let Some(msg) = chan.get_outbound_shutdown() {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: *counterparty_node_id,
// Note that we don't bother generating any events for pre-accept channels -
// they're not considered "channels" yet from the PoV of our events interface.
peer_state.inbound_channel_request_by_id.clear();
- peer_state.channel_by_id.keys().cloned()
- .chain(peer_state.outbound_v1_channel_by_id.keys().cloned())
- .chain(peer_state.inbound_v1_channel_by_id.keys().cloned()).collect()
+ peer_state.channel_by_id.keys().cloned().collect()
};
for channel_id in channel_ids {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
if peer_state_mutex_opt.is_none() { return; }
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(chan) = peer_state.outbound_v1_channel_by_id.get_mut(&msg.channel_id) {
+ if let Some(ChannelPhase::UnfundedOutboundV1(chan)) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash, &self.fee_estimator) {
peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
node_id: *counterparty_node_id,
let mut serializable_peer_count: u64 = 0;
{
let per_peer_state = self.per_peer_state.read().unwrap();
- let mut unfunded_channels = 0;
- let mut number_of_channels = 0;
+ let mut number_of_funded_channels = 0;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
if !peer_state.ok_to_remove(false) {
serializable_peer_count += 1;
}
- number_of_channels += peer_state.channel_by_id.len();
- for (_, channel) in peer_state.channel_by_id.iter() {
- if !channel.context.is_funding_initiated() {
- unfunded_channels += 1;
- }
- }
+
+ number_of_funded_channels += peer_state.channel_by_id.iter().filter(
+ |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_initiated() } else { false }
+ ).count();
}
- ((number_of_channels - unfunded_channels) as u64).write(writer)?;
+ (number_of_funded_channels as u64).write(writer)?;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for (_, channel) in peer_state.channel_by_id.iter() {
- if channel.context.is_funding_initiated() {
- channel.write(writer)?;
- }
+ for channel in peer_state.channel_by_id.iter().filter_map(
+ |(_, phase)| if let ChannelPhase::Funded(channel) = phase {
+ if channel.context.is_funding_initiated() { Some(channel) } else { None }
+ } else { None }
+ ) {
+ channel.write(writer)?;
}
}
}
session_priv.write(writer)?;
}
}
+ PendingOutboundPayment::AwaitingInvoice { .. } => {},
+ PendingOutboundPayment::InvoiceReceived { .. } => {},
PendingOutboundPayment::Fulfilled { .. } => {},
PendingOutboundPayment::Abandoned { .. } => {},
}
let channel_count: u64 = Readable::read(reader)?;
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
- let mut peer_channels: HashMap<PublicKey, HashMap<ChannelId, Channel<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut funded_peer_channels: HashMap<PublicKey, HashMap<ChannelId, ChannelPhase<SP>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = VecDeque::new();
if channel.context.is_funding_initiated() {
id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
}
- match peer_channels.entry(channel.context.get_counterparty_node_id()) {
+ match funded_peer_channels.entry(channel.context.get_counterparty_node_id()) {
hash_map::Entry::Occupied(mut entry) => {
let by_id_map = entry.get_mut();
- by_id_map.insert(channel.context.channel_id(), channel);
+ by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
},
hash_map::Entry::Vacant(entry) => {
let mut by_id_map = HashMap::new();
- by_id_map.insert(channel.context.channel_id(), channel);
+ by_id_map.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
entry.insert(by_id_map);
}
}
let peer_state_from_chans = |channel_by_id| {
PeerState {
channel_by_id,
- outbound_v1_channel_by_id: HashMap::new(),
- inbound_v1_channel_by_id: HashMap::new(),
inbound_channel_request_by_id: HashMap::new(),
latest_features: InitFeatures::empty(),
pending_msg_events: Vec::new(),
let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
for _ in 0..peer_count {
let peer_pubkey = Readable::read(reader)?;
- let peer_chans = peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
+ let peer_chans = funded_peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new());
let mut peer_state = peer_state_from_chans(peer_chans);
peer_state.latest_features = Readable::read(reader)?;
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mtx.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for (_, chan) in peer_state.channel_by_id.iter() {
- // Channels that were persisted have to be funded, otherwise they should have been
- // discarded.
- let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
- let monitor = args.channel_monitors.get(&funding_txo)
- .expect("We already checked for monitor presence when loading channels");
- let mut max_in_flight_update_id = monitor.get_latest_update_id();
- if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
- if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
- max_in_flight_update_id = cmp::max(max_in_flight_update_id,
- handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
- funding_txo, monitor, peer_state, ""));
+ for phase in peer_state.channel_by_id.values() {
+ if let ChannelPhase::Funded(chan) = phase {
+ // Channels that were persisted have to be funded, otherwise they should have been
+ // discarded.
+ let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ let monitor = args.channel_monitors.get(&funding_txo)
+ .expect("We already checked for monitor presence when loading channels");
+ let mut max_in_flight_update_id = monitor.get_latest_update_id();
+ if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
+ if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
+ max_in_flight_update_id = cmp::max(max_in_flight_update_id,
+ handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
+ funding_txo, monitor, peer_state, ""));
+ }
}
- }
- if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
- // If the channel is ahead of the monitor, return InvalidValue:
- log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
- log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
- &chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
- log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
- log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
- log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
- log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
- log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
+ // If the channel is ahead of the monitor, return InvalidValue:
+ log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
+ log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
+ chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
+ log_error!(args.logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
+ log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
+ log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
+ log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
+ log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
+ return Err(DecodeError::InvalidValue);
+ }
+ } else {
+ // We shouldn't have persisted (or read) any unfunded channel types so none should have been
+ // created in this `channel_by_id` map.
+ debug_assert!(false);
return Err(DecodeError::InvalidValue);
}
}
for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
- if chan.context.outbound_scid_alias() == 0 {
- let mut outbound_scid_alias;
- loop {
- outbound_scid_alias = fake_scid::Namespace::OutboundAlias
- .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
- if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
- }
- chan.context.set_outbound_scid_alias(outbound_scid_alias);
- } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
- // Note that in rare cases its possible to hit this while reading an older
- // channel if we just happened to pick a colliding outbound alias above.
- log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
- return Err(DecodeError::InvalidValue);
- }
- if chan.context.is_usable() {
- if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
+ for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
+ if let ChannelPhase::Funded(chan) = phase {
+ if chan.context.outbound_scid_alias() == 0 {
+ let mut outbound_scid_alias;
+ loop {
+ outbound_scid_alias = fake_scid::Namespace::OutboundAlias
+ .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
+ if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
+ }
+ chan.context.set_outbound_scid_alias(outbound_scid_alias);
+ } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
// Note that in rare cases its possible to hit this while reading an older
// channel if we just happened to pick a colliding outbound alias above.
log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
return Err(DecodeError::InvalidValue);
}
+ if chan.context.is_usable() {
+ if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
+ // Note that in rare cases its possible to hit this while reading an older
+ // channel if we just happened to pick a colliding outbound alias above.
+ log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+ } else {
+ // We shouldn't have persisted (or read) any unfunded channel types so none should have been
+ // created in this `channel_by_id` map.
+ debug_assert!(false);
+ return Err(DecodeError::InvalidValue);
}
}
}
let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(channel) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
+ if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
}
}
}
}
-#[cfg(test)]
-macro_rules! get_outbound_v1_channel_ref {
- ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
- {
- $per_peer_state_lock = $node.node.per_peer_state.read().unwrap();
- $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
- $peer_state_lock.outbound_v1_channel_by_id.get_mut(&$channel_id).unwrap()
- }
- }
-}
-
-#[cfg(test)]
-macro_rules! get_inbound_v1_channel_ref {
- ($node: expr, $counterparty_node: expr, $per_peer_state_lock: ident, $peer_state_lock: ident, $channel_id: expr) => {
- {
- $per_peer_state_lock = $node.node.per_peer_state.read().unwrap();
- $peer_state_lock = $per_peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
- $peer_state_lock.inbound_v1_channel_by_id.get_mut(&$channel_id).unwrap()
- }
- }
-}
-
#[cfg(test)]
macro_rules! get_feerate {
($node: expr, $counterparty_node: expr, $channel_id: expr) => {
{
let mut per_peer_state_lock;
let mut peer_state_lock;
- let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
- chan.context.get_feerate_sat_per_1000_weight()
+ let phase = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
+ phase.context().get_feerate_sat_per_1000_weight()
}
}
}
let mut per_peer_state_lock;
let mut peer_state_lock;
let chan = get_channel_ref!($node, $counterparty_node, per_peer_state_lock, peer_state_lock, $channel_id);
- chan.context.get_channel_type().clone()
+ chan.context().get_channel_type().clone()
}
}
}
let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
.unwrap().lock().unwrap();
let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
- if let Some(prev_config) = channel.context.prev_config() {
+ if let Some(prev_config) = channel.context().prev_config() {
prev_config.forwarding_fee_base_msat
} else {
- channel.context.config().forwarding_fee_base_msat
+ channel.context().config().forwarding_fee_base_msat
}
};
if $idx == 1 { fee += expected_extra_fees[i]; }
($node: expr, $counterparty_node: expr, $channel_id: expr) => {{
let peer_state_lock = $node.node.per_peer_state.read().unwrap();
let chan_lock = peer_state_lock.get(&$counterparty_node.node.get_our_node_id()).unwrap().lock().unwrap();
- let chan = chan_lock.channel_by_id.get(&$channel_id).unwrap();
+ let chan = chan_lock.channel_by_id.get(&$channel_id).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
chan.get_value_stat()
}}
}
use crate::sign::{ChannelSigner, EcdsaChannelSigner, EntropySource, SignerProvider};
use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason};
use crate::ln::{ChannelId, PaymentPreimage, PaymentSecret, PaymentHash};
-use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY};
+use crate::ln::channel::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase};
use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA};
use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError};
use crate::ln::{chan_utils, onion_utils};
let counterparty_node = if send_from_initiator { &nodes[0] } else { &nodes[1] };
let mut sender_node_per_peer_lock;
let mut sender_node_peer_state_lock;
- if send_from_initiator {
- let chan = get_inbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
- chan.context.holder_selected_channel_reserve_satoshis = 0;
- chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
- } else {
- let chan = get_outbound_v1_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
- chan.context.holder_selected_channel_reserve_satoshis = 0;
- chan.context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+
+ let channel_phase = get_channel_ref!(sender_node, counterparty_node, sender_node_per_peer_lock, sender_node_peer_state_lock, temp_channel_id);
+ match channel_phase {
+ ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
+ let chan_context = channel_phase.context_mut();
+ chan_context.holder_selected_channel_reserve_satoshis = 0;
+ chan_context.holder_max_htlc_value_in_flight_msat = 100_000_000;
+ },
+ ChannelPhase::Funded(_) => assert!(false),
}
}
let (local_revocation_basepoint, local_htlc_basepoint, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let chan_signer = local_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
- let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let local_chan_signer = local_chan.get_signer();
let mut htlcs: Vec<(HTLCOutputInCommitment, ())> = vec![];
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point, local_funding) = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let local_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let local_chan = chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let chan_signer = local_chan.get_signer();
// Make the signer believe we validated another commitment, so we can release the secret
chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1;
let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_point, remote_funding) = {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
- let remote_chan = chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let remote_chan = chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let chan_signer = remote_chan.get_signer();
let pubkeys = chan_signer.as_ref().pubkeys();
(pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
let res = {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let local_chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let local_chan = local_chan_lock.channel_by_id.get(&chan.2).unwrap();
+ let local_chan = local_chan_lock.channel_by_id.get(&chan.2).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap();
let local_chan_signer = local_chan.get_signer();
let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
commitment_number,
// The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
// well, so HTLCs at exactly the dust limit will not be included in commitment txn.
nodes[2].node.per_peer_state.read().unwrap().get(&nodes[1].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context.holder_dust_limit_satoshis * 1000
+ .unwrap().lock().unwrap().channel_by_id.get(&chan_2.2).unwrap().context().holder_dust_limit_satoshis * 1000
} else { 3000000 };
let (_, first_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
let ds_dust_limit = nodes[3].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context.holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan_2_3.2).unwrap().context().holder_dust_limit_satoshis;
// 0th HTLC:
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0);
let max_accepted_htlcs = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.counterparty_max_accepted_htlcs as u64;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().counterparty_max_accepted_htlcs as u64;
// Fetch a route in advance as we will be unable to once we're unable to send.
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000);
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let channel = chan_lock.channel_by_id.get(&chan.2).unwrap();
- htlc_minimum_msat = channel.context.get_holder_htlc_minimum_msat();
+ htlc_minimum_msat = channel.context().get_holder_htlc_minimum_msat();
}
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], htlc_minimum_msat);
let chan =create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
// We route 2 dust-HTLCs between A and B
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
let bs_dust_limit = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[0].node.get_our_node_id())
- .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context.holder_dust_limit_satoshis;
+ .unwrap().lock().unwrap().channel_by_id.get(&chan.2).unwrap().context().holder_dust_limit_satoshis;
let (_payment_preimage_1, dust_hash, _payment_secret_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
let (_payment_preimage_2, non_dust_hash, _payment_secret_2) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
{
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let mut guard = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- let keys = guard.channel_by_id.get_mut(&channel_id).unwrap().get_signer();
+ let keys = guard.channel_by_id.get_mut(&channel_id).map(
+ |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
+ ).flatten().unwrap().get_signer();
const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
- if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
- assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
- assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
- } else { assert!(false); }
+ if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+ if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+ } else { assert!(false); }
+ } else {
+ assert!(false);
+ }
}
// Our local monitor is in-sync and hasn't processed yet timeout
check_added_monitors!(nodes[0], 1);
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut channel = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2);
- if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
- // Watchtower Alice should already have seen the block and reject the update
- assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
- assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
- assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
- } else { assert!(false); }
+ if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) {
+ if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) {
+ // Watchtower Alice should already have seen the block and reject the update
+ assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::PermanentFailure);
+ assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+ assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed);
+ } else { assert!(false); }
+ } else {
+ assert!(false);
+ }
}
// Our local monitor is in-sync and hasn't processed yet timeout
check_added_monitors!(nodes[0], 1);
// another channel in the ChannelManager - an invalid state. Thus, we'd panic later when we
// try to create another channel. Instead, we drop the channel entirely here (leaving the
// channelmanager in a possibly nonsense state instead).
- let mut as_chan = a_peer_state.outbound_v1_channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap();
- let logger = test_utils::TestLogger::new();
- as_chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+ match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() {
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ let logger = test_utils::TestLogger::new();
+ chan.get_funding_created(tx.clone(), funding_outpoint, &&logger).map_err(|_| ()).unwrap()
+ },
+ _ => panic!("Unexpected ChannelPhase variant"),
+ }
};
check_added_monitors!(nodes[0], 0);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created);
if on_holder_tx {
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- let mut chan = get_outbound_v1_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id);
- chan.context.holder_dust_limit_satoshis = 546;
+ match get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, temporary_channel_id) {
+ ChannelPhase::UnfundedOutboundV1(chan) => {
+ chan.context.holder_dust_limit_satoshis = 546;
+ },
+ _ => panic!("Unexpected ChannelPhase variant"),
+ }
}
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
let chan = chan_lock.channel_by_id.get(&channel_id).unwrap();
- (chan.context.get_dust_buffer_feerate(None) as u64,
- chan.context.get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
+ (chan.context().get_dust_buffer_feerate(None) as u64,
+ chan.context().get_max_dust_htlc_exposure_msat(&LowerBoundedFeeEstimator(nodes[0].fee_estimator)))
};
let dust_outbound_htlc_on_holder_tx_msat: u64 = (dust_buffer_feerate * htlc_timeout_tx_weight(&channel_type_features) / 1000 + open_channel.dust_limit_satoshis - 1) * 1000;
let dust_outbound_htlc_on_holder_tx: u64 = max_dust_htlc_exposure_msat / dust_outbound_htlc_on_holder_tx_msat;
let check_outbound_channel_existence = |should_exist: bool| {
let per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap();
- assert_eq!(chan_lock.outbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+ assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
};
// Channel should exist without any timer ticks.
let check_inbound_channel_existence = |should_exist: bool| {
let per_peer_state = nodes[1].node.per_peer_state.read().unwrap();
let chan_lock = per_peer_state.get(&nodes[0].node.get_our_node_id()).unwrap().lock().unwrap();
- assert_eq!(chan_lock.inbound_v1_channel_by_id.contains_key(&temp_channel_id), should_exist);
+ assert_eq!(chan_lock.channel_by_id.contains_key(&temp_channel_id), should_exist);
};
// Channel should exist without any timer ticks.
/// message. A node can decide to use that information to discover a potential update to its
/// public IPv4 address (NAT) and use that for a [`NodeAnnouncement`] update message containing
/// the new address.
- pub remote_network_address: Option<NetAddress>,
+ pub remote_network_address: Option<SocketAddress>,
}
/// An [`error`] message to be sent to or received from a peer.
/// An address which can be used to connect to a remote peer.
#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum NetAddress {
- /// An IPv4 address/port on which the peer is listening.
- IPv4 {
+pub enum SocketAddress {
+ /// An IPv4 address and port on which the peer is listening.
+ TcpIpV4 {
/// The 4-byte IPv4 address
addr: [u8; 4],
/// The port on which the node is listening
port: u16,
},
- /// An IPv6 address/port on which the peer is listening.
- IPv6 {
+ /// An IPv6 address and port on which the peer is listening.
+ TcpIpV6 {
/// The 16-byte IPv6 address
addr: [u8; 16],
/// The port on which the node is listening
port: u16,
},
}
-impl NetAddress {
+impl SocketAddress {
/// Gets the ID of this address type. Addresses in [`NodeAnnouncement`] messages should be sorted
/// by this.
pub(crate) fn get_id(&self) -> u8 {
match self {
- &NetAddress::IPv4 {..} => { 1 },
- &NetAddress::IPv6 {..} => { 2 },
- &NetAddress::OnionV2(_) => { 3 },
- &NetAddress::OnionV3 {..} => { 4 },
- &NetAddress::Hostname {..} => { 5 },
+ &SocketAddress::TcpIpV4 {..} => { 1 },
+ &SocketAddress::TcpIpV6 {..} => { 2 },
+ &SocketAddress::OnionV2(_) => { 3 },
+ &SocketAddress::OnionV3 {..} => { 4 },
+ &SocketAddress::Hostname {..} => { 5 },
}
}
/// Strict byte-length of address descriptor, 1-byte type not recorded
fn len(&self) -> u16 {
match self {
- &NetAddress::IPv4 { .. } => { 6 },
- &NetAddress::IPv6 { .. } => { 18 },
- &NetAddress::OnionV2(_) => { 12 },
- &NetAddress::OnionV3 { .. } => { 37 },
+ &SocketAddress::TcpIpV4 { .. } => { 6 },
+ &SocketAddress::TcpIpV6 { .. } => { 18 },
+ &SocketAddress::OnionV2(_) => { 12 },
+ &SocketAddress::OnionV3 { .. } => { 37 },
// Consists of 1-byte hostname length, hostname bytes, and 2-byte port.
- &NetAddress::Hostname { ref hostname, .. } => { u16::from(hostname.len()) + 3 },
+ &SocketAddress::Hostname { ref hostname, .. } => { u16::from(hostname.len()) + 3 },
}
}
pub(crate) const MAX_LEN: u16 = 258;
}
-impl Writeable for NetAddress {
+impl Writeable for SocketAddress {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
match self {
- &NetAddress::IPv4 { ref addr, ref port } => {
+ &SocketAddress::TcpIpV4 { ref addr, ref port } => {
1u8.write(writer)?;
addr.write(writer)?;
port.write(writer)?;
},
- &NetAddress::IPv6 { ref addr, ref port } => {
+ &SocketAddress::TcpIpV6 { ref addr, ref port } => {
2u8.write(writer)?;
addr.write(writer)?;
port.write(writer)?;
},
- &NetAddress::OnionV2(bytes) => {
+ &SocketAddress::OnionV2(bytes) => {
3u8.write(writer)?;
bytes.write(writer)?;
},
- &NetAddress::OnionV3 { ref ed25519_pubkey, ref checksum, ref version, ref port } => {
+ &SocketAddress::OnionV3 { ref ed25519_pubkey, ref checksum, ref version, ref port } => {
4u8.write(writer)?;
ed25519_pubkey.write(writer)?;
checksum.write(writer)?;
version.write(writer)?;
port.write(writer)?;
},
- &NetAddress::Hostname { ref hostname, ref port } => {
+ &SocketAddress::Hostname { ref hostname, ref port } => {
5u8.write(writer)?;
hostname.write(writer)?;
port.write(writer)?;
}
}
-impl Readable for Result<NetAddress, u8> {
- fn read<R: Read>(reader: &mut R) -> Result<Result<NetAddress, u8>, DecodeError> {
+impl Readable for Result<SocketAddress, u8> {
+ fn read<R: Read>(reader: &mut R) -> Result<Result<SocketAddress, u8>, DecodeError> {
let byte = <u8 as Readable>::read(reader)?;
match byte {
1 => {
- Ok(Ok(NetAddress::IPv4 {
+ Ok(Ok(SocketAddress::TcpIpV4 {
addr: Readable::read(reader)?,
port: Readable::read(reader)?,
}))
},
2 => {
- Ok(Ok(NetAddress::IPv6 {
+ Ok(Ok(SocketAddress::TcpIpV6 {
addr: Readable::read(reader)?,
port: Readable::read(reader)?,
}))
},
- 3 => Ok(Ok(NetAddress::OnionV2(Readable::read(reader)?))),
+ 3 => Ok(Ok(SocketAddress::OnionV2(Readable::read(reader)?))),
4 => {
- Ok(Ok(NetAddress::OnionV3 {
+ Ok(Ok(SocketAddress::OnionV3 {
ed25519_pubkey: Readable::read(reader)?,
checksum: Readable::read(reader)?,
version: Readable::read(reader)?,
}))
},
5 => {
- Ok(Ok(NetAddress::Hostname {
+ Ok(Ok(SocketAddress::Hostname {
hostname: Readable::read(reader)?,
port: Readable::read(reader)?,
}))
}
}
-impl Readable for NetAddress {
- fn read<R: Read>(reader: &mut R) -> Result<NetAddress, DecodeError> {
+impl Readable for SocketAddress {
+ fn read<R: Read>(reader: &mut R) -> Result<SocketAddress, DecodeError> {
match Readable::read(reader) {
Ok(Ok(res)) => Ok(res),
Ok(Err(_)) => Err(DecodeError::UnknownVersion),
}
}
-/// [`NetAddress`] error variants
+/// [`SocketAddress`] error variants
#[derive(Debug, Eq, PartialEq, Clone)]
-pub enum NetAddressParseError {
+pub enum SocketAddressParseError {
/// Socket address (IPv4/IPv6) parsing error
SocketAddrParse,
/// Invalid input format
InvalidOnionV3,
}
-impl fmt::Display for NetAddressParseError {
+impl fmt::Display for SocketAddressParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
- NetAddressParseError::SocketAddrParse => write!(f, "Socket address (IPv4/IPv6) parsing error"),
- NetAddressParseError::InvalidInput => write!(f, "Invalid input format. \
+ SocketAddressParseError::SocketAddrParse => write!(f, "Socket address (IPv4/IPv6) parsing error"),
+ SocketAddressParseError::InvalidInput => write!(f, "Invalid input format. \
Expected: \"<ipv4>:<port>\", \"[<ipv6>]:<port>\", \"<onion address>.onion:<port>\" or \"<hostname>:<port>\""),
- NetAddressParseError::InvalidPort => write!(f, "Invalid port"),
- NetAddressParseError::InvalidOnionV3 => write!(f, "Invalid onion v3 address"),
+ SocketAddressParseError::InvalidPort => write!(f, "Invalid port"),
+ SocketAddressParseError::InvalidOnionV3 => write!(f, "Invalid onion v3 address"),
}
}
}
#[cfg(feature = "std")]
-impl From<std::net::SocketAddrV4> for NetAddress {
+impl From<std::net::SocketAddrV4> for SocketAddress {
fn from(addr: std::net::SocketAddrV4) -> Self {
- NetAddress::IPv4 { addr: addr.ip().octets(), port: addr.port() }
+ SocketAddress::TcpIpV4 { addr: addr.ip().octets(), port: addr.port() }
}
}
#[cfg(feature = "std")]
-impl From<std::net::SocketAddrV6> for NetAddress {
+impl From<std::net::SocketAddrV6> for SocketAddress {
fn from(addr: std::net::SocketAddrV6) -> Self {
- NetAddress::IPv6 { addr: addr.ip().octets(), port: addr.port() }
+ SocketAddress::TcpIpV6 { addr: addr.ip().octets(), port: addr.port() }
}
}
#[cfg(feature = "std")]
-impl From<std::net::SocketAddr> for NetAddress {
+impl From<std::net::SocketAddr> for SocketAddress {
fn from(addr: std::net::SocketAddr) -> Self {
match addr {
std::net::SocketAddr::V4(addr) => addr.into(),
}
}
-fn parse_onion_address(host: &str, port: u16) -> Result<NetAddress, NetAddressParseError> {
+fn parse_onion_address(host: &str, port: u16) -> Result<SocketAddress, SocketAddressParseError> {
if host.ends_with(".onion") {
let domain = &host[..host.len() - ".onion".len()];
if domain.len() != 56 {
- return Err(NetAddressParseError::InvalidOnionV3);
+ return Err(SocketAddressParseError::InvalidOnionV3);
}
- let onion = base32::Alphabet::RFC4648 { padding: false }.decode(&domain).map_err(|_| NetAddressParseError::InvalidOnionV3)?;
+ let onion = base32::Alphabet::RFC4648 { padding: false }.decode(&domain).map_err(|_| SocketAddressParseError::InvalidOnionV3)?;
if onion.len() != 35 {
- return Err(NetAddressParseError::InvalidOnionV3);
+ return Err(SocketAddressParseError::InvalidOnionV3);
}
let version = onion[0];
let first_checksum_flag = onion[1];
let mut ed25519_pubkey = [0; 32];
ed25519_pubkey.copy_from_slice(&onion[3..35]);
let checksum = u16::from_be_bytes([first_checksum_flag, second_checksum_flag]);
- return Ok(NetAddress::OnionV3 { ed25519_pubkey, checksum, version, port });
+ return Ok(SocketAddress::OnionV3 { ed25519_pubkey, checksum, version, port });
} else {
- return Err(NetAddressParseError::InvalidInput);
+ return Err(SocketAddressParseError::InvalidInput);
}
}
#[cfg(feature = "std")]
-impl FromStr for NetAddress {
- type Err = NetAddressParseError;
+impl FromStr for SocketAddress {
+ type Err = SocketAddressParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match std::net::SocketAddr::from_str(s) {
Err(_) => {
let trimmed_input = match s.rfind(":") {
Some(pos) => pos,
- None => return Err(NetAddressParseError::InvalidInput),
+ None => return Err(SocketAddressParseError::InvalidInput),
};
let host = &s[..trimmed_input];
- let port: u16 = s[trimmed_input + 1..].parse().map_err(|_| NetAddressParseError::InvalidPort)?;
+ let port: u16 = s[trimmed_input + 1..].parse().map_err(|_| SocketAddressParseError::InvalidPort)?;
if host.ends_with(".onion") {
return parse_onion_address(host, port);
};
if let Ok(hostname) = Hostname::try_from(s[..trimmed_input].to_string()) {
- return Ok(NetAddress::Hostname { hostname, port });
+ return Ok(SocketAddress::Hostname { hostname, port });
};
- return Err(NetAddressParseError::SocketAddrParse)
+ return Err(SocketAddressParseError::SocketAddrParse)
},
}
}
/// This should be sanitized before use. There is no guarantee of uniqueness.
pub alias: NodeAlias,
/// List of addresses on which this node is reachable
- pub addresses: Vec<NetAddress>,
+ pub addresses: Vec<SocketAddress>,
pub(crate) excess_address_data: Vec<u8>,
pub(crate) excess_data: Vec<u8>,
}
fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
let global_features: InitFeatures = Readable::read(r)?;
let features: InitFeatures = Readable::read(r)?;
- let mut remote_network_address: Option<NetAddress> = None;
+ let mut remote_network_address: Option<SocketAddress> = None;
let mut networks: Option<WithoutLength<Vec<ChainHash>>> = None;
decode_tlv_stream!(r, {
(1, networks, option),
let alias: NodeAlias = Readable::read(r)?;
let addr_len: u16 = Readable::read(r)?;
- let mut addresses: Vec<NetAddress> = Vec::new();
+ let mut addresses: Vec<SocketAddress> = Vec::new();
let mut addr_readpos = 0;
let mut excess = false;
let mut excess_byte = 0;
use crate::ln::ChannelId;
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket};
- use crate::ln::msgs::NetAddress;
+ use crate::ln::msgs::SocketAddress;
use crate::routing::gossip::{NodeAlias, NodeId};
use crate::util::ser::{Writeable, Readable, Hostname, TransactionU16LenLimited};
#[cfg(feature = "std")]
use std::net::{Ipv4Addr, Ipv6Addr};
- use crate::ln::msgs::NetAddressParseError;
+ use crate::ln::msgs::SocketAddressParseError;
#[test]
fn encoding_channel_reestablish() {
};
let mut addresses = Vec::new();
if ipv4 {
- addresses.push(NetAddress::IPv4 {
+ addresses.push(SocketAddress::TcpIpV4 {
addr: [255, 254, 253, 252],
port: 9735
});
}
if ipv6 {
- addresses.push(NetAddress::IPv6 {
+ addresses.push(SocketAddress::TcpIpV6 {
addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
port: 9735
});
}
if onionv2 {
- addresses.push(NetAddress::OnionV2(
+ addresses.push(msgs::SocketAddress::OnionV2(
[255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]
));
}
if onionv3 {
- addresses.push(NetAddress::OnionV3 {
+ addresses.push(msgs::SocketAddress::OnionV3 {
ed25519_pubkey: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224],
checksum: 32,
version: 16,
});
}
if hostname {
- addresses.push(NetAddress::Hostname {
+ addresses.push(SocketAddress::Hostname {
hostname: Hostname::try_from(String::from("host")).unwrap(),
port: 9735,
});
}.encode(), hex::decode("00000000014001010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap());
let init_msg = msgs::Init { features: InitFeatures::from_le_bytes(vec![]),
networks: Some(vec![mainnet_hash]),
- remote_network_address: Some(NetAddress::IPv4 {
+ remote_network_address: Some(SocketAddress::TcpIpV4 {
addr: [127, 0, 0, 1],
port: 1000,
}),
#[test]
#[cfg(feature = "std")]
- fn test_net_address_from_str() {
- assert_eq!(NetAddress::IPv4 {
+ fn test_socket_address_from_str() {
+ assert_eq!(SocketAddress::TcpIpV4 {
addr: Ipv4Addr::new(127, 0, 0, 1).octets(),
port: 1234,
- }, NetAddress::from_str("127.0.0.1:1234").unwrap());
+ }, SocketAddress::from_str("127.0.0.1:1234").unwrap());
- assert_eq!(NetAddress::IPv6 {
+ assert_eq!(SocketAddress::TcpIpV6 {
addr: Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).octets(),
port: 1234,
- }, NetAddress::from_str("[0:0:0:0:0:0:0:1]:1234").unwrap());
+ }, SocketAddress::from_str("[0:0:0:0:0:0:0:1]:1234").unwrap());
assert_eq!(
- NetAddress::Hostname {
+ SocketAddress::Hostname {
hostname: Hostname::try_from("lightning-node.mydomain.com".to_string()).unwrap(),
port: 1234,
- }, NetAddress::from_str("lightning-node.mydomain.com:1234").unwrap());
+ }, SocketAddress::from_str("lightning-node.mydomain.com:1234").unwrap());
assert_eq!(
- NetAddress::Hostname {
+ SocketAddress::Hostname {
hostname: Hostname::try_from("example.com".to_string()).unwrap(),
port: 1234,
- }, NetAddress::from_str("example.com:1234").unwrap());
- assert_eq!(NetAddress::OnionV3 {
+ }, SocketAddress::from_str("example.com:1234").unwrap());
+ assert_eq!(SocketAddress::OnionV3 {
ed25519_pubkey: [37, 24, 75, 5, 25, 73, 117, 194, 139, 102, 182, 107, 4, 105, 247, 246, 85,
111, 177, 172, 49, 137, 167, 155, 64, 221, 163, 47, 31, 33, 71, 3],
checksum: 48326,
version: 121,
port: 1234
- }, NetAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:1234").unwrap());
- assert_eq!(Err(NetAddressParseError::InvalidOnionV3), NetAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6.onion:1234"));
- assert_eq!(Err(NetAddressParseError::InvalidInput), NetAddress::from_str("127.0.0.1@1234"));
- assert_eq!(Err(NetAddressParseError::InvalidInput), "".parse::<NetAddress>());
- assert!(NetAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.onion:9735:94").is_err());
- assert!(NetAddress::from_str("wrong$%#.com:1234").is_err());
- assert_eq!(Err(NetAddressParseError::InvalidPort), NetAddress::from_str("example.com:wrong"));
- assert!("localhost".parse::<NetAddress>().is_err());
- assert!("localhost:invalid-port".parse::<NetAddress>().is_err());
- assert!( "invalid-onion-v3-hostname.onion:8080".parse::<NetAddress>().is_err());
- assert!("b32.example.onion:invalid-port".parse::<NetAddress>().is_err());
- assert!("invalid-address".parse::<NetAddress>().is_err());
- assert!(NetAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.onion:1234").is_err());
+ }, SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion:1234").unwrap());
+ assert_eq!(Err(SocketAddressParseError::InvalidOnionV3), SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6.onion:1234"));
+ assert_eq!(Err(SocketAddressParseError::InvalidInput), SocketAddress::from_str("127.0.0.1@1234"));
+ assert_eq!(Err(SocketAddressParseError::InvalidInput), "".parse::<SocketAddress>());
+ assert!(SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.onion:9735:94").is_err());
+ assert!(SocketAddress::from_str("wrong$%#.com:1234").is_err());
+ assert_eq!(Err(SocketAddressParseError::InvalidPort), SocketAddress::from_str("example.com:wrong"));
+ assert!("localhost".parse::<SocketAddress>().is_err());
+ assert!("localhost:invalid-port".parse::<SocketAddress>().is_err());
+ assert!( "invalid-onion-v3-hostname.onion:8080".parse::<SocketAddress>().is_err());
+ assert!("b32.example.onion:invalid-port".parse::<SocketAddress>().is_err());
+ assert!("invalid-address".parse::<SocketAddress>().is_err());
+ assert!(SocketAddress::from_str("pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion.onion:1234").is_err());
}
}
let short_channel_id = channels[1].0.contents.short_channel_id;
let amt_to_forward = nodes[1].node.per_peer_state.read().unwrap().get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap().channel_by_id.get(&channels[1].2).unwrap()
- .context.get_counterparty_htlc_minimum_msat() - 1;
+ .context().get_counterparty_htlc_minimum_msat() - 1;
let mut bogus_route = route.clone();
let route_len = bogus_route.paths[0].hops.len();
bogus_route.paths[0].hops[route_len-1].fee_msat = amt_to_forward;
use crate::sign::{EntropySource, NodeSigner, Recipient};
use crate::events::{self, PaymentFailureReason};
use crate::ln::{PaymentHash, PaymentPreimage, PaymentSecret};
-use crate::ln::channelmanager::{ChannelDetails, EventCompletionAction, HTLCSource, IDEMPOTENCY_TIMEOUT_TICKS, PaymentId};
+use crate::ln::channelmanager::{ChannelDetails, EventCompletionAction, HTLCSource, PaymentId};
use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason};
+use crate::offers::invoice::Bolt12Invoice;
use crate::routing::router::{InFlightHtlcs, Path, PaymentParameters, Route, RouteParameters, Router};
use crate::util::errors::APIError;
use crate::util::logger::Logger;
use crate::prelude::*;
use crate::sync::Mutex;
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the idempotency
+/// of payments by [`PaymentId`]. See [`OutboundPayments::remove_stale_payments`].
+///
+/// [`ChannelManager::timer_tick_occurred`]: crate::ln::channelmanager::ChannelManager::timer_tick_occurred
+pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
+
+/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until an invoice request without
+/// a response is timed out.
+///
+/// [`ChannelManager::timer_tick_occurred`]: crate::ln::channelmanager::ChannelManager::timer_tick_occurred
+const INVOICE_REQUEST_TIMEOUT_TICKS: u8 = 3;
+
/// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
/// and later, also stores information for retrying the payment.
pub(crate) enum PendingOutboundPayment {
Legacy {
session_privs: HashSet<[u8; 32]>,
},
+ AwaitingInvoice {
+ timer_ticks_without_response: u8,
+ retry_strategy: Retry,
+ },
+ InvoiceReceived {
+ payment_hash: PaymentHash,
+ retry_strategy: Retry,
+ },
Retryable {
retry_strategy: Option<Retry>,
attempts: PaymentAttempts,
params.previously_failed_channels.push(scid);
}
}
+ fn is_awaiting_invoice(&self) -> bool {
+ match self {
+ PendingOutboundPayment::AwaitingInvoice { .. } => true,
+ _ => false,
+ }
+ }
pub(super) fn is_fulfilled(&self) -> bool {
match self {
PendingOutboundPayment::Fulfilled { .. } => true,
fn payment_hash(&self) -> Option<PaymentHash> {
match self {
PendingOutboundPayment::Legacy { .. } => None,
+ PendingOutboundPayment::AwaitingInvoice { .. } => None,
+ PendingOutboundPayment::InvoiceReceived { payment_hash, .. } => Some(*payment_hash),
PendingOutboundPayment::Retryable { payment_hash, .. } => Some(*payment_hash),
PendingOutboundPayment::Fulfilled { payment_hash, .. } => *payment_hash,
PendingOutboundPayment::Abandoned { payment_hash, .. } => Some(*payment_hash),
PendingOutboundPayment::Legacy { session_privs } |
PendingOutboundPayment::Retryable { session_privs, .. } |
PendingOutboundPayment::Fulfilled { session_privs, .. } |
- PendingOutboundPayment::Abandoned { session_privs, .. }
- => session_privs,
+ PendingOutboundPayment::Abandoned { session_privs, .. } => session_privs,
+ PendingOutboundPayment::AwaitingInvoice { .. } |
+ PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); return; },
});
let payment_hash = self.payment_hash();
*self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash, timer_ticks_without_htlcs: 0 };
payment_hash: *payment_hash,
reason: Some(reason)
};
+ } else if let PendingOutboundPayment::InvoiceReceived { payment_hash, .. } = self {
+ *self = PendingOutboundPayment::Abandoned {
+ session_privs: HashSet::new(),
+ payment_hash: *payment_hash,
+ reason: Some(reason)
+ };
}
}
PendingOutboundPayment::Fulfilled { session_privs, .. } |
PendingOutboundPayment::Abandoned { session_privs, .. } => {
session_privs.remove(session_priv)
- }
+ },
+ PendingOutboundPayment::AwaitingInvoice { .. } |
+ PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); false },
};
if remove_res {
if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
PendingOutboundPayment::Legacy { session_privs } |
PendingOutboundPayment::Retryable { session_privs, .. } => {
session_privs.insert(session_priv)
- }
+ },
+ PendingOutboundPayment::AwaitingInvoice { .. } |
+ PendingOutboundPayment::InvoiceReceived { .. } => { debug_assert!(false); false },
PendingOutboundPayment::Fulfilled { .. } => false,
PendingOutboundPayment::Abandoned { .. } => false,
};
PendingOutboundPayment::Fulfilled { session_privs, .. } |
PendingOutboundPayment::Abandoned { session_privs, .. } => {
session_privs.len()
- }
+ },
+ PendingOutboundPayment::AwaitingInvoice { .. } => 0,
+ PendingOutboundPayment::InvoiceReceived { .. } => 0,
}
}
}
/// Each attempt may be multiple HTLCs along multiple paths if the router decides to split up a
/// retry, and may retry multiple failed HTLCs at once if they failed around the same time and
/// were retried along a route from a single call to [`Router::find_route_with_id`].
- Attempts(usize),
+ Attempts(u32),
#[cfg(not(feature = "no-std"))]
/// Time elapsed before abandoning retries for a payment. At least one attempt at payment is made;
/// see [`PaymentParameters::expiry_time`] to avoid any attempt at payment after a specific time.
Timeout(core::time::Duration),
}
+#[cfg(feature = "no-std")]
+impl_writeable_tlv_based_enum!(Retry,
+ ;
+ (0, Attempts)
+);
+
+#[cfg(not(feature = "no-std"))]
+impl_writeable_tlv_based_enum!(Retry,
+ ;
+ (0, Attempts),
+ (2, Timeout)
+);
+
impl Retry {
pub(crate) fn is_retryable_now(&self, attempts: &PaymentAttempts) -> bool {
match (self, attempts) {
pub(crate) struct PaymentAttemptsUsingTime<T: Time> {
/// This count will be incremented only after the result of the attempt is known. When it's 0,
/// it means the result of the first attempt is not known yet.
- pub(crate) count: usize,
+ pub(crate) count: u32,
/// This field is only used when retry is `Retry::Timeout` which is only build with feature std
#[cfg(not(feature = "no-std"))]
first_attempted_at: T,
},
}
+/// An error when attempting to pay a BOLT 12 invoice.
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub(super) enum Bolt12PaymentError {
+ /// The invoice was not requested.
+ UnexpectedInvoice,
+ /// Payment for an invoice with the corresponding [`PaymentId`] was already initiated.
+ DuplicateInvoice,
+}
+
/// Information which is provided, encrypted, to the payment recipient when sending HTLCs.
///
/// This should generally be constructed with data communicated to us from the recipient (via a
}
}
+ #[allow(unused)]
+ pub(super) fn send_payment_for_bolt12_invoice<R: Deref, ES: Deref, NS: Deref, IH, SP, L: Deref>(
+ &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R,
+ first_hops: Vec<ChannelDetails>, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
+ best_block_height: u32, logger: &L,
+ pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
+ send_payment_along_path: SP,
+ ) -> Result<(), Bolt12PaymentError>
+ where
+ R::Target: Router,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ L::Target: Logger,
+ IH: Fn() -> InFlightHtlcs,
+ SP: Fn(SendAlongPathArgs) -> Result<(), APIError>,
+ {
+ let payment_hash = invoice.payment_hash();
+ match self.pending_outbound_payments.lock().unwrap().entry(payment_id) {
+ hash_map::Entry::Occupied(entry) => match entry.get() {
+ PendingOutboundPayment::AwaitingInvoice { retry_strategy, .. } => {
+ *entry.into_mut() = PendingOutboundPayment::InvoiceReceived {
+ payment_hash,
+ retry_strategy: *retry_strategy,
+ };
+ },
+ _ => return Err(Bolt12PaymentError::DuplicateInvoice),
+ },
+ hash_map::Entry::Vacant(_) => return Err(Bolt12PaymentError::UnexpectedInvoice),
+ };
+
+ let route_params = RouteParameters {
+ payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+ final_value_msat: invoice.amount_msats(),
+ };
+
+ self.find_route_and_send_payment(
+ payment_hash, payment_id, route_params, router, first_hops, &inflight_htlcs,
+ entropy_source, node_signer, best_block_height, logger, pending_events,
+ &send_payment_along_path
+ );
+
+ Ok(())
+ }
+
pub(super) fn check_retry_payments<R: Deref, ES: Deref, NS: Deref, SP, IH, FH, L: Deref>(
&self, router: &R, first_hops: FH, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
best_block_height: u32,
}
core::mem::drop(outbounds);
if let Some((payment_hash, payment_id, route_params)) = retry_id_route_params {
- self.retry_payment_internal(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path)
+ self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops(), &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path)
} else { break }
}
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
outbounds.retain(|pmt_id, pmt| {
let mut retain = true;
- if !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 {
+ if !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_awaiting_invoice() {
pmt.mark_abandoned(PaymentFailureReason::RetriesExhausted);
if let PendingOutboundPayment::Abandoned { payment_hash, reason, .. } = pmt {
pending_events.lock().unwrap().push_back((events::Event::PaymentFailed {
pub(super) fn needs_abandon(&self) -> bool {
let outbounds = self.pending_outbound_payments.lock().unwrap();
outbounds.iter().any(|(_, pmt)|
- !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_fulfilled())
+ !pmt.is_auto_retryable_now() && pmt.remaining_parts() == 0 && !pmt.is_fulfilled() &&
+ !pmt.is_awaiting_invoice())
}
/// Errors immediately on [`RetryableSendFailure`] error conditions. Otherwise, further errors may
Ok(())
}
- fn retry_payment_internal<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
+ fn find_route_and_send_payment<R: Deref, NS: Deref, ES: Deref, IH, SP, L: Deref>(
&self, payment_hash: PaymentHash, payment_id: PaymentId, route_params: RouteParameters,
router: &R, first_hops: Vec<ChannelDetails>, inflight_htlcs: &IH, entropy_source: &ES,
node_signer: &NS, best_block_height: u32, logger: &L,
}
}
- const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
- let mut onion_session_privs = Vec::with_capacity(route.paths.len());
- for _ in 0..route.paths.len() {
- onion_session_privs.push(entropy_source.get_secure_random_bytes());
- }
-
macro_rules! abandon_with_entry {
($payment: expr, $reason: expr) => {
$payment.get_mut().mark_abandoned($reason);
}
}
}
- let (total_msat, recipient_onion, keysend_preimage) = {
+ let (total_msat, recipient_onion, keysend_preimage, onion_session_privs) = {
let mut outbounds = self.pending_outbound_payments.lock().unwrap();
match outbounds.entry(payment_id) {
hash_map::Entry::Occupied(mut payment) => {
- let res = match payment.get() {
+ match payment.get() {
PendingOutboundPayment::Retryable {
total_msat, keysend_preimage, payment_secret, payment_metadata,
custom_tlvs, pending_amt_msat, ..
} => {
+ const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
let retry_amt_msat = route.get_total_amount();
if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
log_error!(logger, "retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat);
abandon_with_entry!(payment, PaymentFailureReason::UnexpectedError);
return
}
- (*total_msat, RecipientOnionFields {
- payment_secret: *payment_secret,
- payment_metadata: payment_metadata.clone(),
- custom_tlvs: custom_tlvs.clone(),
- }, *keysend_preimage)
+
+ if !payment.get().is_retryable_now() {
+ log_error!(logger, "Retries exhausted for payment id {}", &payment_id);
+ abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted);
+ return
+ }
+
+ let total_msat = *total_msat;
+ let recipient_onion = RecipientOnionFields {
+ payment_secret: *payment_secret,
+ payment_metadata: payment_metadata.clone(),
+ custom_tlvs: custom_tlvs.clone(),
+ };
+ let keysend_preimage = *keysend_preimage;
+
+ let mut onion_session_privs = Vec::with_capacity(route.paths.len());
+ for _ in 0..route.paths.len() {
+ onion_session_privs.push(entropy_source.get_secure_random_bytes());
+ }
+
+ for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+ assert!(payment.get_mut().insert(*session_priv_bytes, path));
+ }
+
+ payment.get_mut().increment_attempts();
+
+ (total_msat, recipient_onion, keysend_preimage, onion_session_privs)
},
PendingOutboundPayment::Legacy { .. } => {
log_error!(logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102");
return
},
+ PendingOutboundPayment::AwaitingInvoice { .. } => {
+ log_error!(logger, "Payment not yet sent");
+ return
+ },
+ PendingOutboundPayment::InvoiceReceived { payment_hash, retry_strategy } => {
+ let total_amount = route_params.final_value_msat;
+ let recipient_onion = RecipientOnionFields {
+ payment_secret: None,
+ payment_metadata: None,
+ custom_tlvs: vec![],
+ };
+ let retry_strategy = Some(*retry_strategy);
+ let payment_params = Some(route_params.payment_params.clone());
+ let (retryable_payment, onion_session_privs) = self.create_pending_payment(
+ *payment_hash, recipient_onion.clone(), None, &route,
+ retry_strategy, payment_params, entropy_source, best_block_height
+ );
+ *payment.into_mut() = retryable_payment;
+ (total_amount, recipient_onion, None, onion_session_privs)
+ },
PendingOutboundPayment::Fulfilled { .. } => {
log_error!(logger, "Payment already completed");
return
log_error!(logger, "Payment already abandoned (with some HTLCs still pending)");
return
},
- };
- if !payment.get().is_retryable_now() {
- log_error!(logger, "Retries exhausted for payment id {}", &payment_id);
- abandon_with_entry!(payment, PaymentFailureReason::RetriesExhausted);
- return
- }
- payment.get_mut().increment_attempts();
- for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
- assert!(payment.get_mut().insert(*session_priv_bytes, path));
}
- res
},
hash_map::Entry::Vacant(_) => {
log_error!(logger, "Payment with ID {} not found", &payment_id);
match err {
PaymentSendFailure::AllFailedResendSafe(errs) => {
Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), logger, pending_events);
- self.retry_payment_internal(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+ self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
},
PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => {
Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), logger, pending_events);
// Some paths were sent, even if we failed to send the full MPP value our recipient may
// misbehave and claim the funds, at which point we have to consider the payment sent, so
// return `Ok()` here, ignoring any retry errors.
- self.retry_payment_internal(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
+ self.find_route_and_send_payment(payment_hash, payment_id, retry, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path);
},
PaymentSendFailure::PartialFailure { failed_paths_retry: None, .. } => {
// This may happen if we send a payment and some paths fail, but only due to a temporary
keysend_preimage: Option<PaymentPreimage>, route: &Route, retry_strategy: Option<Retry>,
payment_params: Option<PaymentParameters>, entropy_source: &ES, best_block_height: u32
) -> Result<Vec<[u8; 32]>, PaymentSendFailure> where ES::Target: EntropySource {
+ let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
+ match pending_outbounds.entry(payment_id) {
+ hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
+ hash_map::Entry::Vacant(entry) => {
+ let (payment, onion_session_privs) = self.create_pending_payment(
+ payment_hash, recipient_onion, keysend_preimage, route, retry_strategy,
+ payment_params, entropy_source, best_block_height
+ );
+ entry.insert(payment);
+ Ok(onion_session_privs)
+ },
+ }
+ }
+
+ fn create_pending_payment<ES: Deref>(
+ &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
+ keysend_preimage: Option<PaymentPreimage>, route: &Route, retry_strategy: Option<Retry>,
+ payment_params: Option<PaymentParameters>, entropy_source: &ES, best_block_height: u32
+ ) -> (PendingOutboundPayment, Vec<[u8; 32]>)
+ where
+ ES::Target: EntropySource,
+ {
let mut onion_session_privs = Vec::with_capacity(route.paths.len());
for _ in 0..route.paths.len() {
onion_session_privs.push(entropy_source.get_secure_random_bytes());
}
+ let mut payment = PendingOutboundPayment::Retryable {
+ retry_strategy,
+ attempts: PaymentAttempts::new(),
+ payment_params,
+ session_privs: HashSet::new(),
+ pending_amt_msat: 0,
+ pending_fee_msat: Some(0),
+ payment_hash,
+ payment_secret: recipient_onion.payment_secret,
+ payment_metadata: recipient_onion.payment_metadata,
+ keysend_preimage,
+ custom_tlvs: recipient_onion.custom_tlvs,
+ starting_block_height: best_block_height,
+ total_msat: route.get_total_amount(),
+ };
+
+ for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
+ assert!(payment.insert(*session_priv_bytes, path));
+ }
+
+ (payment, onion_session_privs)
+ }
+
+ #[allow(unused)]
+ pub(super) fn add_new_awaiting_invoice(
+ &self, payment_id: PaymentId, retry_strategy: Retry
+ ) -> Result<(), ()> {
let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
match pending_outbounds.entry(payment_id) {
- hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment),
+ hash_map::Entry::Occupied(_) => Err(()),
hash_map::Entry::Vacant(entry) => {
- let payment = entry.insert(PendingOutboundPayment::Retryable {
+ entry.insert(PendingOutboundPayment::AwaitingInvoice {
+ timer_ticks_without_response: 0,
retry_strategy,
- attempts: PaymentAttempts::new(),
- payment_params,
- session_privs: HashSet::new(),
- pending_amt_msat: 0,
- pending_fee_msat: Some(0),
- payment_hash,
- payment_secret: recipient_onion.payment_secret,
- payment_metadata: recipient_onion.payment_metadata,
- keysend_preimage,
- custom_tlvs: recipient_onion.custom_tlvs,
- starting_block_height: best_block_height,
- total_msat: route.get_total_amount(),
});
- for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) {
- assert!(payment.insert(*session_priv_bytes, path));
- }
-
- Ok(onion_session_privs)
+ Ok(())
},
}
}
}
}
- pub(super) fn remove_stale_resolved_payments(&self,
- pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>)
+ pub(super) fn remove_stale_payments(
+ &self, pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>)
{
- // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
- // from the map. However, if we did that immediately when the last payment HTLC is claimed,
- // this could race the user making a duplicate send_payment call and our idempotency
- // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
- // removal. This should be more than sufficient to ensure the idempotency of any
- // `send_payment` calls that were made at the same time the `PaymentSent` event was being
- // processed.
let mut pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
- let pending_events = pending_events.lock().unwrap();
+ let mut pending_events = pending_events.lock().unwrap();
pending_outbound_payments.retain(|payment_id, payment| {
+ // If an outbound payment was completed, and no pending HTLCs remain, we should remove it
+ // from the map. However, if we did that immediately when the last payment HTLC is claimed,
+ // this could race the user making a duplicate send_payment call and our idempotency
+ // guarantees would be violated. Instead, we wait a few timer ticks to do the actual
+ // removal. This should be more than sufficient to ensure the idempotency of any
+ // `send_payment` calls that were made at the same time the `PaymentSent` event was being
+ // processed.
if let PendingOutboundPayment::Fulfilled { session_privs, timer_ticks_without_htlcs, .. } = payment {
let mut no_remaining_entries = session_privs.is_empty();
if no_remaining_entries {
*timer_ticks_without_htlcs = 0;
true
}
+ } else if let PendingOutboundPayment::AwaitingInvoice { timer_ticks_without_response, .. } = payment {
+ *timer_ticks_without_response += 1;
+ if *timer_ticks_without_response <= INVOICE_REQUEST_TIMEOUT_TICKS {
+ true
+ } else {
+ pending_events.push_back(
+ (events::Event::InvoiceRequestFailed { payment_id: *payment_id }, None)
+ );
+ false
+ }
} else { true }
});
}
}, None));
payment.remove();
}
+ } else if let PendingOutboundPayment::AwaitingInvoice { .. } = payment.get() {
+ pending_events.lock().unwrap().push_back((events::Event::InvoiceRequestFailed {
+ payment_id,
+ }, None));
+ payment.remove();
}
}
}
(1, reason, option),
(2, payment_hash, required),
},
+ (5, AwaitingInvoice) => {
+ (0, timer_ticks_without_response, required),
+ (2, retry_strategy, required),
+ },
+ (7, InvoiceReceived) => {
+ (0, payment_hash, required),
+ (2, retry_strategy, required),
+ },
);
#[cfg(test)]
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
use crate::ln::features::{ChannelFeatures, NodeFeatures};
use crate::ln::msgs::{ErrorAction, LightningError};
- use crate::ln::outbound_payment::{OutboundPayments, Retry, RetryableSendFailure};
+ use crate::ln::outbound_payment::{Bolt12PaymentError, INVOICE_REQUEST_TIMEOUT_TICKS, OutboundPayments, Retry, RetryableSendFailure};
+ use crate::offers::invoice::DEFAULT_RELATIVE_EXPIRY;
+ use crate::offers::offer::OfferBuilder;
+ use crate::offers::test_utils::*;
use crate::routing::gossip::NetworkGraph;
use crate::routing::router::{InFlightHtlcs, Path, PaymentParameters, Route, RouteHop, RouteParameters};
use crate::sync::{Arc, Mutex, RwLock};
PaymentId([0; 32]), None, &Route { paths: vec![], route_params: None },
Some(Retry::Attempts(1)), Some(expired_route_params.payment_params.clone()),
&&keys_manager, 0).unwrap();
- outbound_payments.retry_payment_internal(
+ outbound_payments.find_route_and_send_payment(
PaymentHash([0; 32]), PaymentId([0; 32]), expired_route_params, &&router, vec![],
&|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger, &pending_events,
&|_| Ok(()));
PaymentId([0; 32]), None, &Route { paths: vec![], route_params: None },
Some(Retry::Attempts(1)), Some(route_params.payment_params.clone()),
&&keys_manager, 0).unwrap();
- outbound_payments.retry_payment_internal(
+ outbound_payments.find_route_and_send_payment(
PaymentHash([0; 32]), PaymentId([0; 32]), route_params, &&router, vec![],
&|| InFlightHtlcs::new(), &&keys_manager, &&keys_manager, 0, &&logger, &pending_events,
&|_| Ok(()));
} else { panic!("Unexpected event"); }
if let Event::PaymentFailed { .. } = events[1].0 { } else { panic!("Unexpected event"); }
}
+
+ #[test]
+ fn removes_stale_awaiting_invoice() {
+ let pending_events = Mutex::new(VecDeque::new());
+ let outbound_payments = OutboundPayments::new();
+ let payment_id = PaymentId([0; 32]);
+
+ assert!(!outbound_payments.has_pending_payments());
+ assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+ assert!(outbound_payments.has_pending_payments());
+
+ for _ in 0..INVOICE_REQUEST_TIMEOUT_TICKS {
+ outbound_payments.remove_stale_payments(&pending_events);
+ assert!(outbound_payments.has_pending_payments());
+ assert!(pending_events.lock().unwrap().is_empty());
+ }
+
+ outbound_payments.remove_stale_payments(&pending_events);
+ assert!(!outbound_payments.has_pending_payments());
+ assert!(!pending_events.lock().unwrap().is_empty());
+ assert_eq!(
+ pending_events.lock().unwrap().pop_front(),
+ Some((Event::InvoiceRequestFailed { payment_id }, None)),
+ );
+ assert!(pending_events.lock().unwrap().is_empty());
+
+ assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+ assert!(outbound_payments.has_pending_payments());
+
+ assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_err());
+ }
+
+ #[test]
+ fn removes_abandoned_awaiting_invoice() {
+ let pending_events = Mutex::new(VecDeque::new());
+ let outbound_payments = OutboundPayments::new();
+ let payment_id = PaymentId([0; 32]);
+
+ assert!(!outbound_payments.has_pending_payments());
+ assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+ assert!(outbound_payments.has_pending_payments());
+
+ outbound_payments.abandon_payment(
+ payment_id, PaymentFailureReason::UserAbandoned, &pending_events
+ );
+ assert!(!outbound_payments.has_pending_payments());
+ assert!(!pending_events.lock().unwrap().is_empty());
+ assert_eq!(
+ pending_events.lock().unwrap().pop_front(),
+ Some((Event::InvoiceRequestFailed { payment_id }, None)),
+ );
+ assert!(pending_events.lock().unwrap().is_empty());
+ }
+
+ #[cfg(feature = "std")]
+ #[test]
+ fn fails_sending_payment_for_expired_bolt12_invoice() {
+ let logger = test_utils::TestLogger::new();
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+ let scorer = RwLock::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+ let pending_events = Mutex::new(VecDeque::new());
+ let outbound_payments = OutboundPayments::new();
+ let payment_id = PaymentId([0; 32]);
+
+ assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+ assert!(outbound_payments.has_pending_payments());
+
+ let created_at = now() - DEFAULT_RELATIVE_EXPIRY;
+ let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .build().unwrap()
+ .sign(payer_sign).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), created_at).unwrap()
+ .build().unwrap()
+ .sign(recipient_sign).unwrap();
+
+ assert_eq!(
+ outbound_payments.send_payment_for_bolt12_invoice(
+ &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+ &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ ),
+ Ok(()),
+ );
+ assert!(!outbound_payments.has_pending_payments());
+
+ let payment_hash = invoice.payment_hash();
+ let reason = Some(PaymentFailureReason::PaymentExpired);
+
+ assert!(!pending_events.lock().unwrap().is_empty());
+ assert_eq!(
+ pending_events.lock().unwrap().pop_front(),
+ Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+ );
+ assert!(pending_events.lock().unwrap().is_empty());
+ }
+
+ #[test]
+ fn fails_finding_route_for_bolt12_invoice() {
+ let logger = test_utils::TestLogger::new();
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+ let scorer = RwLock::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+ let pending_events = Mutex::new(VecDeque::new());
+ let outbound_payments = OutboundPayments::new();
+ let payment_id = PaymentId([0; 32]);
+
+ assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+ assert!(outbound_payments.has_pending_payments());
+
+ let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .build().unwrap()
+ .sign(payer_sign).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+ .build().unwrap()
+ .sign(recipient_sign).unwrap();
+
+ router.expect_find_route(
+ RouteParameters {
+ payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+ final_value_msat: invoice.amount_msats(),
+ },
+ Err(LightningError { err: String::new(), action: ErrorAction::IgnoreError }),
+ );
+
+ assert_eq!(
+ outbound_payments.send_payment_for_bolt12_invoice(
+ &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+ &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ ),
+ Ok(()),
+ );
+ assert!(!outbound_payments.has_pending_payments());
+
+ let payment_hash = invoice.payment_hash();
+ let reason = Some(PaymentFailureReason::RouteNotFound);
+
+ assert!(!pending_events.lock().unwrap().is_empty());
+ assert_eq!(
+ pending_events.lock().unwrap().pop_front(),
+ Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+ );
+ assert!(pending_events.lock().unwrap().is_empty());
+ }
+
+ #[test]
+ fn fails_paying_for_bolt12_invoice() {
+ let logger = test_utils::TestLogger::new();
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+ let scorer = RwLock::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+ let pending_events = Mutex::new(VecDeque::new());
+ let outbound_payments = OutboundPayments::new();
+ let payment_id = PaymentId([0; 32]);
+
+ assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+ assert!(outbound_payments.has_pending_payments());
+
+ let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .build().unwrap()
+ .sign(payer_sign).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+ .build().unwrap()
+ .sign(recipient_sign).unwrap();
+
+ let route_params = RouteParameters {
+ payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+ final_value_msat: invoice.amount_msats(),
+ };
+ router.expect_find_route(
+ route_params.clone(), Ok(Route { paths: vec![], route_params: Some(route_params) })
+ );
+
+ assert_eq!(
+ outbound_payments.send_payment_for_bolt12_invoice(
+ &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+ &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ ),
+ Ok(()),
+ );
+ assert!(!outbound_payments.has_pending_payments());
+
+ let payment_hash = invoice.payment_hash();
+ let reason = Some(PaymentFailureReason::UnexpectedError);
+
+ assert!(!pending_events.lock().unwrap().is_empty());
+ assert_eq!(
+ pending_events.lock().unwrap().pop_front(),
+ Some((Event::PaymentFailed { payment_id, payment_hash, reason }, None)),
+ );
+ assert!(pending_events.lock().unwrap().is_empty());
+ }
+
+ #[test]
+ fn sends_payment_for_bolt12_invoice() {
+ let logger = test_utils::TestLogger::new();
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
+ let scorer = RwLock::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(network_graph, &scorer);
+ let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
+
+ let pending_events = Mutex::new(VecDeque::new());
+ let outbound_payments = OutboundPayments::new();
+ let payment_id = PaymentId([0; 32]);
+
+ let invoice = OfferBuilder::new("foo".into(), recipient_pubkey())
+ .amount_msats(1000)
+ .build().unwrap()
+ .request_invoice(vec![1; 32], payer_pubkey()).unwrap()
+ .build().unwrap()
+ .sign(payer_sign).unwrap()
+ .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap()
+ .build().unwrap()
+ .sign(recipient_sign).unwrap();
+
+ let route_params = RouteParameters {
+ payment_params: PaymentParameters::from_bolt12_invoice(&invoice),
+ final_value_msat: invoice.amount_msats(),
+ };
+ router.expect_find_route(
+ route_params.clone(),
+ Ok(Route {
+ paths: vec![
+ Path {
+ hops: vec![
+ RouteHop {
+ pubkey: recipient_pubkey(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: 42,
+ channel_features: ChannelFeatures::empty(),
+ fee_msat: invoice.amount_msats(),
+ cltv_expiry_delta: 0,
+ }
+ ],
+ blinded_tail: None,
+ }
+ ],
+ route_params: Some(route_params),
+ })
+ );
+
+ assert!(!outbound_payments.has_pending_payments());
+ assert_eq!(
+ outbound_payments.send_payment_for_bolt12_invoice(
+ &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+ &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ ),
+ Err(Bolt12PaymentError::UnexpectedInvoice),
+ );
+ assert!(!outbound_payments.has_pending_payments());
+ assert!(pending_events.lock().unwrap().is_empty());
+
+ assert!(outbound_payments.add_new_awaiting_invoice(payment_id, Retry::Attempts(0)).is_ok());
+ assert!(outbound_payments.has_pending_payments());
+
+ assert_eq!(
+ outbound_payments.send_payment_for_bolt12_invoice(
+ &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+ &&keys_manager, 0, &&logger, &pending_events, |_| Ok(())
+ ),
+ Ok(()),
+ );
+ assert!(outbound_payments.has_pending_payments());
+ assert!(pending_events.lock().unwrap().is_empty());
+
+ assert_eq!(
+ outbound_payments.send_payment_for_bolt12_invoice(
+ &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
+ &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ ),
+ Err(Bolt12PaymentError::DuplicateInvoice),
+ );
+ assert!(outbound_payments.has_pending_payments());
+ assert!(pending_events.lock().unwrap().is_empty());
+ }
}
use crate::chain::transaction::OutPoint;
use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose};
use crate::ln::channel::EXPIRE_PREV_CONFIG_TICKS;
-use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, IDEMPOTENCY_TIMEOUT_TICKS, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
+use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo};
use crate::ln::features::Bolt11InvoiceFeatures;
use crate::ln::{msgs, ChannelId, PaymentSecret, PaymentPreimage};
use crate::ln::msgs::ChannelMessageHandler;
-use crate::ln::outbound_payment::Retry;
+use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry};
use crate::routing::gossip::{EffectiveCapacity, RoutingFees};
use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters, find_route};
use crate::routing::scoring::ChannelUsage;
let mut peer_state = per_peer_state.get(&nodes[2].node.get_our_node_id())
.unwrap().lock().unwrap();
let mut channel = peer_state.channel_by_id.get_mut(&chan_id_2).unwrap();
- let mut new_config = channel.context.config();
+ let mut new_config = channel.context().config();
new_config.forwarding_fee_base_msat += 100_000;
- channel.context.update_config(&new_config);
+ channel.context_mut().update_config(&new_config);
new_route.paths[0].hops[0].fee_msat += 100_000;
}
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
- channel_1.context.get_short_channel_id().unwrap()
+ channel_1.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_1_used_liquidity, None);
}
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
- channel_2.context.get_short_channel_id().unwrap()
+ channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, None);
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
- channel_1.context.get_short_channel_id().unwrap()
+ channel_1.context().get_short_channel_id().unwrap()
);
// First hop accounts for expected 1000 msat fee
assert_eq!(chan_1_used_liquidity, Some(501000));
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
- channel_2.context.get_short_channel_id().unwrap()
+ channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, Some(500000));
let chan_1_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
- channel_1.context.get_short_channel_id().unwrap()
+ channel_1.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_1_used_liquidity, None);
}
let chan_2_used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[2].node.get_our_node_id()),
- channel_2.context.get_short_channel_id().unwrap()
+ channel_2.context().get_short_channel_id().unwrap()
);
assert_eq!(chan_2_used_liquidity, None);
}
let used_liquidity = inflight_htlcs.used_liquidity_msat(
&NodeId::from_pubkey(&nodes[0].node.get_our_node_id()) ,
&NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
- channel.context.get_short_channel_id().unwrap()
+ channel.context().get_short_channel_id().unwrap()
);
assert_eq!(used_liquidity, Some(2000000));
// Check for unknown channel id error.
let unknown_chan_id_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &ChannelId::from_bytes([42; 32]), nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
assert_eq!(unknown_chan_id_err , APIError::ChannelUnavailable {
- err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}.",
log_bytes!([42; 32]), nodes[2].node.get_our_node_id()) });
if test == InterceptTest::Fail {
let temp_chan_id = nodes[1].node.create_channel(nodes[2].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
let unusable_chan_err = nodes[1].node.forward_intercepted_htlc(intercept_id, &temp_chan_id, nodes[2].node.get_our_node_id(), expected_outbound_amount_msat).unwrap_err();
assert_eq!(unusable_chan_err , APIError::ChannelUnavailable {
- err: format!("Funded channel with id {} not found for the passed counterparty node_id {}. Channel may still be opening.",
- &temp_chan_id, nodes[2].node.get_our_node_id()) });
+ err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
+ temp_chan_id, nodes[2].node.get_our_node_id()) });
assert_eq!(nodes[1].node.get_and_clear_pending_msg_events().len(), 1);
// Open the just-in-time channel so the payment can then be forwarded.
use crate::ln::ChannelId;
use crate::ln::features::{InitFeatures, NodeFeatures};
use crate::ln::msgs;
-use crate::ln::msgs::{ChannelMessageHandler, LightningError, NetAddress, OnionMessageHandler, RoutingMessageHandler};
+use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler};
use crate::ln::channelmanager::{SimpleArcChannelManager, SimpleRefChannelManager};
use crate::util::ser::{VecWriter, Writeable, Writer};
use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
/// handshake and can talk to this peer normally (though use [`Peer::handshake_complete`] to
/// check this.
their_features: Option<InitFeatures>,
- their_net_address: Option<NetAddress>,
+ their_socket_address: Option<SocketAddress>,
pending_outbound_buffer: LinkedList<Vec<u8>>,
pending_outbound_buffer_first_msg_offset: usize,
/// A function used to filter out local or private addresses
/// <https://www.iana.org./assignments/ipv4-address-space/ipv4-address-space.xhtml>
/// <https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml>
-fn filter_addresses(ip_address: Option<NetAddress>) -> Option<NetAddress> {
+fn filter_addresses(ip_address: Option<SocketAddress>) -> Option<SocketAddress> {
match ip_address{
// For IPv4 range 10.0.0.0 - 10.255.255.255 (10/8)
- Some(NetAddress::IPv4{addr: [10, _, _, _], port: _}) => None,
+ Some(SocketAddress::TcpIpV4{addr: [10, _, _, _], port: _}) => None,
// For IPv4 range 0.0.0.0 - 0.255.255.255 (0/8)
- Some(NetAddress::IPv4{addr: [0, _, _, _], port: _}) => None,
+ Some(SocketAddress::TcpIpV4{addr: [0, _, _, _], port: _}) => None,
// For IPv4 range 100.64.0.0 - 100.127.255.255 (100.64/10)
- Some(NetAddress::IPv4{addr: [100, 64..=127, _, _], port: _}) => None,
+ Some(SocketAddress::TcpIpV4{addr: [100, 64..=127, _, _], port: _}) => None,
// For IPv4 range 127.0.0.0 - 127.255.255.255 (127/8)
- Some(NetAddress::IPv4{addr: [127, _, _, _], port: _}) => None,
+ Some(SocketAddress::TcpIpV4{addr: [127, _, _, _], port: _}) => None,
// For IPv4 range 169.254.0.0 - 169.254.255.255 (169.254/16)
- Some(NetAddress::IPv4{addr: [169, 254, _, _], port: _}) => None,
+ Some(SocketAddress::TcpIpV4{addr: [169, 254, _, _], port: _}) => None,
// For IPv4 range 172.16.0.0 - 172.31.255.255 (172.16/12)
- Some(NetAddress::IPv4{addr: [172, 16..=31, _, _], port: _}) => None,
+ Some(SocketAddress::TcpIpV4{addr: [172, 16..=31, _, _], port: _}) => None,
// For IPv4 range 192.168.0.0 - 192.168.255.255 (192.168/16)
- Some(NetAddress::IPv4{addr: [192, 168, _, _], port: _}) => None,
+ Some(SocketAddress::TcpIpV4{addr: [192, 168, _, _], port: _}) => None,
// For IPv4 range 192.88.99.0 - 192.88.99.255 (192.88.99/24)
- Some(NetAddress::IPv4{addr: [192, 88, 99, _], port: _}) => None,
+ Some(SocketAddress::TcpIpV4{addr: [192, 88, 99, _], port: _}) => None,
// For IPv6 range 2000:0000:0000:0000:0000:0000:0000:0000 - 3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff (2000::/3)
- Some(NetAddress::IPv6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
+ Some(SocketAddress::TcpIpV6{addr: [0x20..=0x3F, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _], port: _}) => ip_address,
// For remaining addresses
- Some(NetAddress::IPv6{addr: _, port: _}) => None,
+ Some(SocketAddress::TcpIpV6{addr: _, port: _}) => None,
Some(..) => ip_address,
None => None,
}
///
/// The returned `Option`s will only be `Some` if an address had been previously given via
/// [`Self::new_outbound_connection`] or [`Self::new_inbound_connection`].
- pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<NetAddress>)> {
+ pub fn get_peer_node_ids(&self) -> Vec<(PublicKey, Option<SocketAddress>)> {
let peers = self.peers.read().unwrap();
peers.values().filter_map(|peer_mutex| {
let p = peer_mutex.lock().unwrap();
if !p.handshake_complete() {
return None;
}
- Some((p.their_node_id.unwrap().0, p.their_net_address.clone()))
+ Some((p.their_node_id.unwrap().0, p.their_socket_address.clone()))
}).collect()
}
/// [`socket_disconnected`].
///
/// [`socket_disconnected`]: PeerManager::socket_disconnected
- pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<Vec<u8>, PeerHandleError> {
+ pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor, remote_network_address: Option<SocketAddress>) -> Result<Vec<u8>, PeerHandleError> {
let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
let res = peer_encryptor.get_act_one(&self.secp_ctx).to_vec();
let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
channel_encryptor: peer_encryptor,
their_node_id: None,
their_features: None,
- their_net_address: remote_network_address,
+ their_socket_address: remote_network_address,
pending_outbound_buffer: LinkedList::new(),
pending_outbound_buffer_first_msg_offset: 0,
/// [`socket_disconnected`].
///
/// [`socket_disconnected`]: PeerManager::socket_disconnected
- pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<NetAddress>) -> Result<(), PeerHandleError> {
+ pub fn new_inbound_connection(&self, descriptor: Descriptor, remote_network_address: Option<SocketAddress>) -> Result<(), PeerHandleError> {
let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.node_signer);
let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
channel_encryptor: peer_encryptor,
their_node_id: None,
their_features: None,
- their_net_address: remote_network_address,
+ their_socket_address: remote_network_address,
pending_outbound_buffer: LinkedList::new(),
pending_outbound_buffer_first_msg_offset: 0,
insert_node_id!();
let features = self.init_features(&their_node_id);
let networks = self.message_handler.chan_handler.get_genesis_hashes();
- let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+ let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) };
self.enqueue_message(peer, &resp);
peer.awaiting_pong_timer_tick_intervals = 0;
},
insert_node_id!();
let features = self.init_features(&their_node_id);
let networks = self.message_handler.chan_handler.get_genesis_hashes();
- let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_net_address.clone()) };
+ let resp = msgs::Init { features, networks, remote_network_address: filter_addresses(peer.their_socket_address.clone()) };
self.enqueue_message(peer, &resp);
peer.awaiting_pong_timer_tick_intervals = 0;
},
// be absurd. We ensure this by checking that at least 100 (our stated public contract on when
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
- const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
+ const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (SocketAddress::MAX_LEN as u32 + 1) / 2;
#[deny(const_err)]
#[allow(dead_code)]
// ...by failing to compile if the number of addresses that would be half of a message is
/// Panics if `addresses` is absurdly large (more than 100).
///
/// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
- pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
+ pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<SocketAddress>) {
if addresses.len() > 100 {
panic!("More than half the message size was taken up by public addresses!");
}
use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
use crate::ln::peer_handler::{CustomMessageHandler, PeerManager, MessageHandler, SocketDescriptor, IgnoringMessageHandler, filter_addresses};
use crate::ln::{msgs, wire};
- use crate::ln::msgs::{LightningError, NetAddress};
+ use crate::ln::msgs::{LightningError, SocketAddress};
use crate::util::test_utils;
use bitcoin::Network;
fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+ let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
let id_b = peer_b.node_signer.get_node_id(Recipient::Node).unwrap();
let mut fd_b = FileDescriptor {
fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+ let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
fd: $id + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+ let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
let mut fd_b = FileDescriptor {
fd: $id + ctr * 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+ let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
let initial_data = peers[1].new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
peers[0].new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
if peers[0].read_event(&mut fd_a, &initial_data).is_err() { break; }
fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+ let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
let mut fd_b = FileDescriptor {
fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+ let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_a = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1000};
+ let addr_a = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1000};
let mut fd_b = FileDescriptor {
fd: 1, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_b = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1001};
+ let addr_b = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1001};
let initial_data = peer_b.new_outbound_connection(id_a, fd_b.clone(), Some(addr_a.clone())).unwrap();
peer_a.new_inbound_connection(fd_a.clone(), Some(addr_b.clone())).unwrap();
assert_eq!(peer_a.read_event(&mut fd_a, &initial_data).unwrap(), false);
fd: 3, outbound_data: Arc::new(Mutex::new(Vec::new())),
disconnect: Arc::new(AtomicBool::new(false)),
};
- let addr_dup = NetAddress::IPv4{addr: [127, 0, 0, 1], port: 1003};
+ let addr_dup = SocketAddress::TcpIpV4{addr: [127, 0, 0, 1], port: 1003};
let id_a = cfgs[0].node_signer.get_node_id(Recipient::Node).unwrap();
peers[0].new_inbound_connection(fd_dup.clone(), Some(addr_dup.clone())).unwrap();
// Tests the filter_addresses function.
// For (10/8)
- let ip_address = NetAddress::IPv4{addr: [10, 0, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [10, 0, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [10, 0, 255, 201], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [10, 0, 255, 201], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [10, 255, 255, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [10, 255, 255, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For (0/8)
- let ip_address = NetAddress::IPv4{addr: [0, 0, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [0, 0, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [0, 0, 255, 187], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [0, 0, 255, 187], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [0, 255, 255, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [0, 255, 255, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For (100.64/10)
- let ip_address = NetAddress::IPv4{addr: [100, 64, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [100, 64, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [100, 78, 255, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [100, 78, 255, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [100, 127, 255, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [100, 127, 255, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For (127/8)
- let ip_address = NetAddress::IPv4{addr: [127, 0, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [127, 0, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [127, 65, 73, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [127, 65, 73, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [127, 255, 255, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [127, 255, 255, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For (169.254/16)
- let ip_address = NetAddress::IPv4{addr: [169, 254, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [169, 254, 221, 101], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 221, 101], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [169, 254, 255, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [169, 254, 255, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For (172.16/12)
- let ip_address = NetAddress::IPv4{addr: [172, 16, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [172, 16, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [172, 27, 101, 23], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [172, 27, 101, 23], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [172, 31, 255, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [172, 31, 255, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For (192.168/16)
- let ip_address = NetAddress::IPv4{addr: [192, 168, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [192, 168, 205, 159], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 205, 159], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [192, 168, 255, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [192, 168, 255, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For (192.88.99/24)
- let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 140], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 140], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv4{addr: [192, 88, 99, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [192, 88, 99, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For other IPv4 addresses
- let ip_address = NetAddress::IPv4{addr: [188, 255, 99, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [188, 255, 99, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
- let ip_address = NetAddress::IPv4{addr: [123, 8, 129, 14], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [123, 8, 129, 14], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
- let ip_address = NetAddress::IPv4{addr: [2, 88, 9, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV4{addr: [2, 88, 9, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
// For (2000::/3)
- let ip_address = NetAddress::IPv6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV6{addr: [32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
- let ip_address = NetAddress::IPv6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV6{addr: [45, 34, 209, 190, 0, 123, 55, 34, 0, 0, 3, 27, 201, 0, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
- let ip_address = NetAddress::IPv6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
+ let ip_address = SocketAddress::TcpIpV6{addr: [63, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), Some(ip_address.clone()));
// For other IPv6 addresses
- let ip_address = NetAddress::IPv6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV6{addr: [24, 240, 12, 32, 0, 0, 0, 0, 20, 97, 0, 32, 121, 254, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV6{addr: [68, 23, 56, 63, 0, 0, 2, 7, 75, 109, 0, 39, 0, 0, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
- let ip_address = NetAddress::IPv6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
+ let ip_address = SocketAddress::TcpIpV6{addr: [101, 38, 140, 230, 100, 0, 30, 98, 0, 26, 0, 0, 57, 96, 0, 0], port: 1000};
assert_eq!(filter_addresses(Some(ip_address.clone())), None);
// For (None)
{
let mut node_0_per_peer_lock;
let mut node_0_peer_state_lock;
- get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context.closing_fee_limits.as_mut().unwrap().1 *= 10;
+ get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_id).context_mut().closing_fee_limits.as_mut().unwrap().1 *= 10;
}
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
#[cfg(feature = "std")]
use std::time::SystemTime;
-const DEFAULT_RELATIVE_EXPIRY: Duration = Duration::from_secs(7200);
+pub(crate) const DEFAULT_RELATIVE_EXPIRY: Duration = Duration::from_secs(7200);
/// Tag for the hash function used when signing a [`Bolt12Invoice`]'s merkle root.
pub const SIGNATURE_TAG: &'static str = concat!("lightning", "invoice", "signature");
pub mod refund;
pub(crate) mod signer;
#[cfg(test)]
-mod test_utils;
+pub(crate) mod test_utils;
use crate::offers::invoice::BlindedPayInfo;
use crate::offers::merkle::TaggedHash;
-pub(super) fn payer_keys() -> KeyPair {
+pub(crate) fn payer_keys() -> KeyPair {
let secp_ctx = Secp256k1::new();
KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap())
}
-pub(super) fn payer_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
+pub(crate) fn payer_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
let secp_ctx = Secp256k1::new();
let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
}
-pub(super) fn payer_pubkey() -> PublicKey {
+pub(crate) fn payer_pubkey() -> PublicKey {
payer_keys().public_key()
}
-pub(super) fn recipient_keys() -> KeyPair {
+pub(crate) fn recipient_keys() -> KeyPair {
let secp_ctx = Secp256k1::new();
KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap())
}
-pub(super) fn recipient_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
+pub(crate) fn recipient_sign<T: AsRef<TaggedHash>>(message: &T) -> Result<Signature, Infallible> {
let secp_ctx = Secp256k1::new();
let keys = KeyPair::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[43; 32]).unwrap());
Ok(secp_ctx.sign_schnorr_no_aux_rand(message.as_ref().as_digest(), &keys))
}
-pub(super) fn recipient_pubkey() -> PublicKey {
+pub(crate) fn recipient_pubkey() -> PublicKey {
recipient_keys().public_key()
}
SecretKey::from_slice(&[byte; 32]).unwrap()
}
-pub(super) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
+pub(crate) fn payment_paths() -> Vec<(BlindedPayInfo, BlindedPath)> {
let paths = vec![
BlindedPath {
introduction_node_id: pubkey(40),
payinfo.into_iter().zip(paths.into_iter()).collect()
}
-pub(super) fn payment_hash() -> PaymentHash {
+pub(crate) fn payment_hash() -> PaymentHash {
PaymentHash([42; 32])
}
-pub(super) fn now() -> Duration {
+pub(crate) fn now() -> Duration {
std::time::SystemTime::now()
.duration_since(std::time::SystemTime::UNIX_EPOCH)
.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH")
}
-pub(super) struct FixedEntropy;
+pub(crate) struct FixedEntropy;
impl EntropySource for FixedEntropy {
fn get_secure_random_bytes(&self) -> [u8; 32] {
use crate::events::{MessageSendEvent, MessageSendEventsProvider};
use crate::ln::ChannelId;
use crate::ln::features::{ChannelFeatures, NodeFeatures, InitFeatures};
-use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, NetAddress, MAX_VALUE_MSAT};
+use crate::ln::msgs::{DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress, MAX_VALUE_MSAT};
use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, GossipTimestampFilter};
use crate::ln::msgs::{QueryChannelRange, ReplyChannelRange, QueryShortChannelIds, ReplyShortChannelIdsEnd};
use crate::ln::msgs;
impl NodeAnnouncementInfo {
/// Internet-level addresses via which one can connect to the node
- pub fn addresses(&self) -> &[NetAddress] {
+ pub fn addresses(&self) -> &[SocketAddress] {
self.announcement_message.as_ref()
.map(|msg| msg.contents.addresses.as_slice())
.unwrap_or_default()
impl Writeable for NodeAnnouncementInfo {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
- let empty_addresses = Vec::<NetAddress>::new();
+ let empty_addresses = Vec::<SocketAddress>::new();
write_tlv_fields!(writer, {
(0, self.features, required),
(2, self.last_update, required),
(8, announcement_message, option),
(10, _addresses, optional_vec), // deprecated, not used anymore
});
- let _: Option<Vec<NetAddress>> = _addresses;
+ let _: Option<Vec<SocketAddress>> = _addresses;
Ok(Self { features: features.0.unwrap(), last_update: last_update.0.unwrap(), rgb: rgb.0.unwrap(),
alias: alias.0.unwrap(), announcement_message })
}
}
// A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is
-// necessary to maintain compatibility with previous serializations of `NetAddress` that have an
+// necessary to maintain compatibility with previous serializations of `SocketAddress` that have an
// invalid hostname set. We ignore and eat all errors until we are either able to read a
// `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end.
struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo);
/// Get network addresses by node id.
/// Returns None if the requested node is completely unknown,
/// or if node announcement for the node was never received.
- pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<NetAddress>> {
+ pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<SocketAddress>> {
self.nodes.get(&NodeId::from_pubkey(&pubkey))
.and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec()))
}
// You may not use this file except in accordance with one or both of these
// licenses.
-//! This module contains a simple key-value store trait KVStorePersister that
+//! This module contains a simple key-value store trait [`KVStore`] that
//! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
//! and [`ChannelMonitor`] all in one place.
use core::ops::Deref;
-use bitcoin::hashes::hex::ToHex;
+use bitcoin::hashes::hex::{FromHex, ToHex};
+use bitcoin::{BlockHash, Txid};
+
use crate::io;
+use crate::prelude::{Vec, String};
use crate::routing::scoring::WriteableScore;
use crate::chain;
use crate::ln::channelmanager::ChannelManager;
use crate::routing::router::Router;
use crate::routing::gossip::NetworkGraph;
-use super::{logger::Logger, ser::Writeable};
-
-/// Trait for a key-value store for persisting some writeable object at some key
-/// Implementing `KVStorePersister` provides auto-implementations for [`Persister`]
-/// and [`Persist`] traits. It uses "manager", "network_graph",
-/// and "monitors/{funding_txo_id}_{funding_txo_index}" for keys.
-pub trait KVStorePersister {
- /// Persist the given writeable using the provided key
- fn persist<W: Writeable>(&self, key: &str, object: &W) -> io::Result<()>;
+use crate::util::logger::Logger;
+use crate::util::ser::{ReadableArgs, Writeable};
+
+/// The alphabet of characters allowed for namespaces and keys.
+pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
+
+/// The maximum number of characters namespaces and keys may have.
+pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
+
+/// The namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`ChannelManager`] will be persisted.
+pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
+
+/// The namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_NAMESPACE: &str = "monitors";
+/// The sub-namespace under which [`ChannelMonitor`]s will be persisted.
+pub const CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE: &str = "";
+
+/// The namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`NetworkGraph`] will be persisted.
+pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
+
+/// The namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_NAMESPACE: &str = "";
+/// The sub-namespace under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_SUB_NAMESPACE: &str = "";
+/// The key under which the [`WriteableScore`] will be persisted.
+pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
+
+/// Provides an interface that allows storage and retrieval of persisted values that are associated
+/// with given keys.
+///
+/// In order to avoid collisions the key space is segmented based on the given `namespace`s and
+/// `sub_namespace`s. Implementations of this trait are free to handle them in different ways, as
+/// long as per-namespace key uniqueness is asserted.
+///
+/// Keys and namespaces are required to be valid ASCII strings in the range of
+/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
+/// namespaces and sub-namespaces (`""`) are assumed to be a valid, however, if `namespace` is
+/// empty, `sub_namespace` is required to be empty, too. This means that concerns should always be
+/// separated by namespace first, before sub-namespaces are used. While the number of namespaces
+/// will be relatively small and is determined at compile time, there may be many sub-namespaces
+/// per namespace. Note that per-namespace uniqueness needs to also hold for keys *and*
+/// namespaces/sub-namespaces in any given namespace/sub-namespace, i.e., conflicts between keys
+/// and equally named namespaces/sub-namespaces must be avoided.
+///
+/// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
+/// interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the
+/// data model previously assumed by `KVStorePersister::persist`.
+pub trait KVStore {
+ /// Returns the data stored for the given `namespace`, `sub_namespace`, and `key`.
+ ///
+ /// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
+ /// `namespace` and `sub_namespace`.
+ ///
+ /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
+ fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>>;
+ /// Persists the given data under the given `key`.
+ ///
+ /// Will create the given `namespace` and `sub_namespace` if not already present in the store.
+ fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()>;
+ /// Removes any data that had previously been persisted under the given `key`.
+ ///
+ /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
+ /// remove the given `key` at some point in time after the method returns, e.g., as part of an
+ /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
+ /// [`KVStore::list`] might include the removed key until the changes are actually persisted.
+ ///
+ /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
+ /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
+ /// potentially get lost on crash after the method returns. Therefore, this flag should only be
+ /// set for `remove` operations that can be safely replayed at a later time.
+ ///
+ /// Returns successfully if no data will be stored for the given `namespace`, `sub_namespace`, and
+ /// `key`, independently of whether it was present before its invokation or not.
+ fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, lazy: bool) -> io::Result<()>;
+ /// Returns a list of keys that are stored under the given `sub_namespace` in `namespace`.
+ ///
+ /// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
+ /// returned keys. Returns an empty list if `namespace` or `sub_namespace` is unknown.
+ fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>>;
}
/// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
}
-impl<'a, A: KVStorePersister, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
+
+impl<'a, A: KVStore, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref, S: WriteableScore<'a>> Persister<'a, M, T, ES, NS, SP, F, R, L, S> for A
where M::Target: 'static + chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: 'static + BroadcasterInterface,
ES::Target: 'static + EntropySource,
R::Target: 'static + Router,
L::Target: 'static + Logger,
{
- /// Persist the given ['ChannelManager'] to disk with the name "manager", returning an error if persistence failed.
+ /// Persist the given [`ChannelManager`] to disk, returning an error if persistence failed.
fn persist_manager(&self, channel_manager: &ChannelManager<M, T, ES, NS, SP, F, R, L>) -> Result<(), io::Error> {
- self.persist("manager", channel_manager)
+ self.write(CHANNEL_MANAGER_PERSISTENCE_NAMESPACE,
+ CHANNEL_MANAGER_PERSISTENCE_SUB_NAMESPACE,
+ CHANNEL_MANAGER_PERSISTENCE_KEY,
+ &channel_manager.encode())
}
- /// Persist the given [`NetworkGraph`] to disk with the name "network_graph", returning an error if persistence failed.
+ /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
- self.persist("network_graph", network_graph)
+ self.write(NETWORK_GRAPH_PERSISTENCE_NAMESPACE,
+ NETWORK_GRAPH_PERSISTENCE_SUB_NAMESPACE,
+ NETWORK_GRAPH_PERSISTENCE_KEY,
+ &network_graph.encode())
}
- /// Persist the given [`WriteableScore`] to disk with name "scorer", returning an error if persistence failed.
+ /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
- self.persist("scorer", &scorer)
+ self.write(SCORER_PERSISTENCE_NAMESPACE,
+ SCORER_PERSISTENCE_SUB_NAMESPACE,
+ SCORER_PERSISTENCE_KEY,
+ &scorer.encode())
}
}
-impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStorePersister> Persist<ChannelSigner> for K {
+impl<ChannelSigner: WriteableEcdsaChannelSigner, K: KVStore> Persist<ChannelSigner> for K {
// TODO: We really need a way for the persister to inform the user that its time to crash/shut
// down once these start returning failure.
// A PermanentFailure implies we should probably just shut down the node since we're
// force-closing channels without even broadcasting!
fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
- let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
- match self.persist(&key, monitor) {
+ let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+ match self.write(
+ CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
+ CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+ &key, &monitor.encode())
+ {
Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
}
}
fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus {
- let key = format!("monitors/{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
- match self.persist(&key, monitor) {
+ let key = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
+ match self.write(
+ CHANNEL_MONITOR_PERSISTENCE_NAMESPACE,
+ CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE,
+ &key, &monitor.encode())
+ {
Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
Err(_) => chain::ChannelMonitorUpdateStatus::PermanentFailure,
}
}
}
+
+/// Read previously persisted [`ChannelMonitor`]s from the store.
+pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
+ kv_store: K, entropy_source: ES, signer_provider: SP,
+) -> io::Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>>
+where
+ K::Target: KVStore,
+ ES::Target: EntropySource + Sized,
+ SP::Target: SignerProvider + Sized,
+{
+ let mut res = Vec::new();
+
+ for stored_key in kv_store.list(
+ CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE)?
+ {
+ if stored_key.len() < 66 {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "Stored key has invalid length"));
+ }
+
+ let txid = Txid::from_hex(stored_key.split_at(64).0).map_err(|_| {
+ io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
+ })?;
+
+ let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
+ io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
+ })?;
+
+ match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::Signer>)>::read(
+ &mut io::Cursor::new(
+ kv_store.read(CHANNEL_MONITOR_PERSISTENCE_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SUB_NAMESPACE, &stored_key)?),
+ (&*entropy_source, &*signer_provider),
+ ) {
+ Ok((block_hash, channel_monitor)) => {
+ if channel_monitor.get_funding_txo().0.txid != txid
+ || channel_monitor.get_funding_txo().0.index != index
+ {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "ChannelMonitor was stored under the wrong key",
+ ));
+ }
+ res.push((block_hash, channel_monitor));
+ }
+ Err(_) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "Failed to deserialize ChannelMonitor"
+ ))
+ }
+ }
+ }
+ Ok(res)
+}
use crate::util::test_channel_signer::{TestChannelSigner, EnforcementState};
use crate::util::logger::{Logger, Level, Record};
use crate::util::ser::{Readable, ReadableArgs, Writer, Writeable};
+use crate::util::persist::KVStore;
use bitcoin::EcdsaSighashType;
use bitcoin::blockdata::constants::ChainHash;
}
}
+pub(crate) struct TestStore {
+ persisted_bytes: Mutex<HashMap<String, HashMap<String, Vec<u8>>>>,
+ read_only: bool,
+}
+
+impl TestStore {
+ pub fn new(read_only: bool) -> Self {
+ let persisted_bytes = Mutex::new(HashMap::new());
+ Self { persisted_bytes, read_only }
+ }
+}
+
+impl KVStore for TestStore {
+ fn read(&self, namespace: &str, sub_namespace: &str, key: &str) -> io::Result<Vec<u8>> {
+ let persisted_lock = self.persisted_bytes.lock().unwrap();
+ let prefixed = if sub_namespace.is_empty() {
+ namespace.to_string()
+ } else {
+ format!("{}/{}", namespace, sub_namespace)
+ };
+
+ if let Some(outer_ref) = persisted_lock.get(&prefixed) {
+ if let Some(inner_ref) = outer_ref.get(key) {
+ let bytes = inner_ref.clone();
+ Ok(bytes)
+ } else {
+ Err(io::Error::new(io::ErrorKind::NotFound, "Key not found"))
+ }
+ } else {
+ Err(io::Error::new(io::ErrorKind::NotFound, "Namespace not found"))
+ }
+ }
+
+ fn write(&self, namespace: &str, sub_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> {
+ if self.read_only {
+ return Err(io::Error::new(
+ io::ErrorKind::PermissionDenied,
+ "Cannot modify read-only store",
+ ));
+ }
+ let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+ let prefixed = if sub_namespace.is_empty() {
+ namespace.to_string()
+ } else {
+ format!("{}/{}", namespace, sub_namespace)
+ };
+ let outer_e = persisted_lock.entry(prefixed).or_insert(HashMap::new());
+ let mut bytes = Vec::new();
+ bytes.write_all(buf)?;
+ outer_e.insert(key.to_string(), bytes);
+ Ok(())
+ }
+
+ fn remove(&self, namespace: &str, sub_namespace: &str, key: &str, _lazy: bool) -> io::Result<()> {
+ if self.read_only {
+ return Err(io::Error::new(
+ io::ErrorKind::PermissionDenied,
+ "Cannot modify read-only store",
+ ));
+ }
+
+ let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+ let prefixed = if sub_namespace.is_empty() {
+ namespace.to_string()
+ } else {
+ format!("{}/{}", namespace, sub_namespace)
+ };
+ if let Some(outer_ref) = persisted_lock.get_mut(&prefixed) {
+ outer_ref.remove(&key.to_string());
+ }
+
+ Ok(())
+ }
+
+ fn list(&self, namespace: &str, sub_namespace: &str) -> io::Result<Vec<String>> {
+ let mut persisted_lock = self.persisted_bytes.lock().unwrap();
+
+ let prefixed = if sub_namespace.is_empty() {
+ namespace.to_string()
+ } else {
+ format!("{}/{}", namespace, sub_namespace)
+ };
+ match persisted_lock.entry(prefixed) {
+ hash_map::Entry::Occupied(e) => Ok(e.get().keys().cloned().collect()),
+ hash_map::Entry::Vacant(_) => Ok(Vec::new()),
+ }
+ }
+}
+
pub struct TestBroadcaster {
pub txn_broadcasted: Mutex<Vec<Transaction>>,
pub blocks: Arc<Mutex<Vec<(Block, u32)>>>,
--- /dev/null
+## Backwards Compatibility
+
+* If an `Event::InvoiceRequestFailed` was generated for a BOLT 12 payment (#2371), downgrading will result in the payment silently failing if the event had not been processed yet.
--- /dev/null
+## Backwards Compatibility
+
+* Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister` interface can use a concatenation of `[{namespace}/[{sub_namespace}/]]{key}` to recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
--- /dev/null
+* The `NetAddress` has been moved to `SocketAddress`. The fieds `IPv4` and `IPv6` are also rename to `TcpIpV4` and `TcpIpV6` (#2358).