package org.ldk.batteries;
-import org.jetbrains.annotations.Nullable;
-import org.ldk.enums.LDKNetwork;
+import javax.annotation.Nullable;
+import org.ldk.enums.Network;
+import org.ldk.enums.Recipient;
import org.ldk.structs.*;
-import org.ldk.util.TwoTuple;
+
+import java.io.IOException;
+import java.util.HashSet;
/**
* A simple utility class which assists in constructing a fresh or deserializing from disk a ChannelManager and one or
* more ChannelMonitors.
+ *
+ * Also constructs a PeerManager and spawns a background thread to monitor for and notify you of relevant Events.
+ *
+ * Note that you must ensure you hold a reference to any constructed ChannelManagerConstructor objects to ensure you
+ * continue to receive events generated by the background thread which will be stopped if this object is garbage
+ * collected.
*/
public class ChannelManagerConstructor {
/**
* An Exception that indicates the serialized data is invalid and has been corrupted on disk. You should attempt to
* restore from a backup if there is one which is known to be current. Otherwise, funds may have been lost.
*/
- public static class InvalidSerializedDataException extends Exception {}
+ public static class InvalidSerializedDataException extends Exception {
+ InvalidSerializedDataException(String reason) {
+ super(reason);
+ }
+ }
/**
* The ChannelManager either deserialized or newly-constructed.
* After doing so (and syncing the blockchain on the channel manager as well), you should call chain_sync_completed()
* and then continue to normal application operation.
*/
- public final TwoTuple<ChannelMonitor, byte[]>[] channel_monitors;
+ public final TwoTuple_BlockHashChannelMonitorZ[] channel_monitors;
+ /**
+ * A PeerManager which is constructed to pass messages and handle connections to peers.
+ */
+ public final PeerManager peer_manager;
+ /**
+ * A NioPeerHandler which manages a background thread to handle socket events and pass them to the peer_manager.
+ *
+ * This is `null` until `chain_sync_completed` is called.
+ */
+ public NioPeerHandler nio_peer_handler = null;
+ /**
+ * If a `NetworkGraph` is provided to the constructor *and* a `LockableScore` is provided to
+ * `chain_sync_completed`, this will be non-null after `chain_sync_completed` returns.
+ *
+ * It should be used to send payments instead of doing so directly via the `channel_manager`.
+ *
+ * When payments are made through this, they are automatically retried and the provided Scorer
+ * will be updated with payment failure data.
+ */
+ @Nullable public InvoicePayer payer;
private final ChainMonitor chain_monitor;
+ /**
+ * The `NetworkGraph` deserialized from the byte given to the constructor when deserializing or the `NetworkGraph`
+ * given explicitly to the new-object constructor.
+ */
+ @Nullable public final NetworkGraph net_graph;
+ @Nullable private final P2PGossipSync graph_msg_handler;
+ private final Logger logger;
+
+ private final byte[] router_rand_bytes;
+
/**
* Deserializes a channel manager and a set of channel monitors from the given serialized copies and interface implementations
*
* Note that if the provided Watch is a ChainWatch and has an associated filter, the previously registered
* outputs will be loaded when chain_sync_completed is called.
*/
- public ChannelManagerConstructor(byte[] channel_manager_serialized, byte[][] channel_monitors_serialized,
- KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor, @Nullable Filter filter,
+ public ChannelManagerConstructor(byte[] channel_manager_serialized, byte[][] channel_monitors_serialized, UserConfig config,
+ KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor,
+ @Nullable Filter filter, @Nullable byte[] net_graph_serialized,
BroadcasterInterface tx_broadcaster, Logger logger) throws InvalidSerializedDataException {
+ final IgnoringMessageHandler ignoring_handler = IgnoringMessageHandler.of();
final ChannelMonitor[] monitors = new ChannelMonitor[channel_monitors_serialized.length];
- this.channel_monitors = new TwoTuple[monitors.length];
+ this.channel_monitors = new TwoTuple_BlockHashChannelMonitorZ[monitors.length];
+ HashSet<OutPoint> monitor_funding_set = new HashSet();
for (int i = 0; i < monitors.length; i++) {
- Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = UtilMethods.constructor_BlockHashChannelMonitorZ_read(channel_monitors_serialized[i], keys_interface);
+ Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = UtilMethods.C2Tuple_BlockHashChannelMonitorZ_read(channel_monitors_serialized[i], keys_interface);
if (res instanceof Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_Err) {
- throw new InvalidSerializedDataException();
+ throw new InvalidSerializedDataException("Serialized ChannelMonitor was corrupt");
}
- monitors[i] = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) res).res.b;
- this.channel_monitors[i] = new TwoTuple<>(monitors[i], ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK)res).res.a);
+ byte[] block_hash = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK)res).res.get_a();
+ monitors[i] = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) res).res.get_b();
+ this.channel_monitors[i] = TwoTuple_BlockHashChannelMonitorZ.of(block_hash, monitors[i]);
+ if (!monitor_funding_set.add(monitors[i].get_funding_txo().get_a()))
+ throw new InvalidSerializedDataException("Set of ChannelMonitors contained duplicates (ie the same funding_txo was set on multiple monitors)");
}
Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ res =
- UtilMethods.constructor_BlockHashChannelManagerZ_read(channel_manager_serialized, keys_interface, fee_estimator, chain_monitor.as_Watch(), tx_broadcaster,
- logger, UserConfig.constructor_default(), monitors);
- if (res instanceof Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_Err) {
- throw new InvalidSerializedDataException();
+ UtilMethods.C2Tuple_BlockHashChannelManagerZ_read(channel_manager_serialized, keys_interface, fee_estimator, chain_monitor.as_Watch(), tx_broadcaster,
+ logger, config, monitors);
+ if (!res.is_ok()) {
+ throw new InvalidSerializedDataException("Serialized ChannelManager was corrupt");
}
- this.channel_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.b;
- this.channel_manager_latest_block_hash = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.a;
+ this.channel_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.get_b();
+ this.channel_manager_latest_block_hash = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.get_a();
this.chain_monitor = chain_monitor;
+ this.logger = logger;
+ byte[] random_data = keys_interface.get_secure_random_bytes();
+ if (net_graph_serialized != null) {
+ Result_NetworkGraphDecodeErrorZ graph_res = NetworkGraph.read(net_graph_serialized, logger);
+ if (!graph_res.is_ok()) {
+ throw new InvalidSerializedDataException("Serialized Network Graph was corrupt");
+ }
+ this.net_graph = ((Result_NetworkGraphDecodeErrorZ.Result_NetworkGraphDecodeErrorZ_OK)graph_res).res;
+ } else {
+ this.net_graph = null;
+ }
+ Result_SecretKeyNoneZ node_secret = keys_interface.get_node_secret(Recipient.LDKRecipient_Node);
+ assert node_secret.is_ok();
+ if (net_graph != null) {
+ //TODO: We really need to expose the Access here to let users prevent DoS issues
+ this.graph_msg_handler = P2PGossipSync.of(net_graph, Option_AccessZ.none(), logger);
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
+ graph_msg_handler.as_RoutingMessageHandler(),
+ ignoring_handler.as_OnionMessageHandler(),
+ ((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
+ (int)(System.currentTimeMillis() / 1000),
+ random_data, logger, ignoring_handler.as_CustomMessageHandler());
+ } else {
+ this.graph_msg_handler = null;
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
+ ignoring_handler.as_RoutingMessageHandler(),
+ ignoring_handler.as_OnionMessageHandler(),
+ ((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
+ (int)(System.currentTimeMillis() / 1000),
+ random_data, logger, ignoring_handler.as_CustomMessageHandler());
+ }
if (filter != null) {
for (ChannelMonitor monitor : monitors) {
monitor.load_outputs_to_watch(filter);
}
}
+ router_rand_bytes = keys_interface.get_secure_random_bytes();
}
/**
* Constructs a channel manager from the given interface implementations
*/
- public ChannelManagerConstructor(LDKNetwork network, UserConfig config, byte[] current_blockchain_tip_hash, int current_blockchain_tip_height,
+ public ChannelManagerConstructor(Network network, UserConfig config, byte[] current_blockchain_tip_hash, int current_blockchain_tip_height,
KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor,
- BroadcasterInterface tx_broadcaster, Logger logger) throws InvalidSerializedDataException {
- channel_monitors = new TwoTuple[0];
+ @Nullable NetworkGraph net_graph,
+ BroadcasterInterface tx_broadcaster, Logger logger) {
+ final IgnoringMessageHandler ignoring_handler = IgnoringMessageHandler.of();
+ channel_monitors = new TwoTuple_BlockHashChannelMonitorZ[0];
channel_manager_latest_block_hash = null;
this.chain_monitor = chain_monitor;
- channel_manager = ChannelManager.constructor_new(fee_estimator, chain_monitor.as_Watch(), tx_broadcaster, logger, keys_interface, config, network, current_blockchain_tip_hash, current_blockchain_tip_height);
+ BestBlock block = BestBlock.of(current_blockchain_tip_hash, current_blockchain_tip_height);
+ ChainParameters params = ChainParameters.of(network, block);
+ channel_manager = ChannelManager.of(fee_estimator, chain_monitor.as_Watch(), tx_broadcaster, logger, keys_interface, config, params);
+ this.logger = logger;
+ byte[] random_data = keys_interface.get_secure_random_bytes();
+ this.net_graph = net_graph;
+ Result_SecretKeyNoneZ node_secret = keys_interface.get_node_secret(Recipient.LDKRecipient_Node);
+ assert node_secret.is_ok();
+ if (net_graph != null) {
+ //TODO: We really need to expose the Access here to let users prevent DoS issues
+ this.graph_msg_handler = P2PGossipSync.of(net_graph, Option_AccessZ.none(), logger);
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
+ graph_msg_handler.as_RoutingMessageHandler(),
+ ignoring_handler.as_OnionMessageHandler(),
+ ((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
+ (int)(System.currentTimeMillis() / 1000),
+ random_data, logger, ignoring_handler.as_CustomMessageHandler());
+ } else {
+ this.graph_msg_handler = null;
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
+ ignoring_handler.as_RoutingMessageHandler(),
+ ignoring_handler.as_OnionMessageHandler(),
+ ((Result_SecretKeyNoneZ.Result_SecretKeyNoneZ_OK)node_secret).res,
+ (int)(System.currentTimeMillis() / 1000),
+ random_data, logger, ignoring_handler.as_CustomMessageHandler());
+ }
+ router_rand_bytes = keys_interface.get_secure_random_bytes();
}
/**
* Abstract interface which should handle Events and persist the ChannelManager. When you call chain_sync_completed
* a background thread is started which will automatically call these methods for you when events occur.
*/
- public interface ChannelManagerPersister {
- void handle_events(Event[] events);
+ public interface EventHandler {
+ void handle_event(Event events);
void persist_manager(byte[] channel_manager_bytes);
+ void persist_network_graph(byte[] network_graph);
+ void persist_scorer(byte[] scorer_bytes);
}
- Thread persister_thread = null;
- volatile boolean shutdown = false;
+ BackgroundProcessor background_processor = null;
/**
* Utility which adds all of the deserialized ChannelMonitors to the chain watch so that further updates from the
* ChannelManager are processed as normal.
*
* This also spawns a background thread which will call the appropriate methods on the provided
- * ChannelManagerPersister as required.
+ * EventHandler as required.
*/
- public void chain_sync_completed(ChannelManagerPersister persister) {
- if (persister_thread != null) { return; }
- for (TwoTuple<ChannelMonitor, byte[]> monitor: channel_monitors) {
- this.chain_monitor.as_Watch().watch_channel(monitor.a.get_funding_txo().a, monitor.a);
+ public void chain_sync_completed(EventHandler event_handler, @Nullable MultiThreadedLockableScore scorer) {
+ try {
+ this.nio_peer_handler = new NioPeerHandler(this.peer_manager);
+ } catch (IOException e) {
+ throw new IllegalStateException("We should never fail to construct nio objects unless we're on a platform that cannot run LDK.");
}
- persister_thread = new Thread(() -> {
- long lastTimerTick = System.currentTimeMillis();
- while (true) {
- boolean need_persist = this.channel_manager.await_persistable_update_timeout(1);
- Event[] events = this.channel_manager.as_EventsProvider().get_and_clear_pending_events();
- if (events.length != 0) {
- persister.handle_events(events);
- need_persist = true;
- }
- events = this.chain_monitor.as_EventsProvider().get_and_clear_pending_events();
- if (events.length != 0) {
- persister.handle_events(events);
- need_persist = true;
- }
- if (need_persist) {
- persister.persist_manager(this.channel_manager.write());
- }
- if (shutdown) {
- return;
- }
- if (lastTimerTick < System.currentTimeMillis() - 60 * 1000) {
- this.channel_manager.timer_chan_freshness_every_min();
- lastTimerTick = System.currentTimeMillis();
- }
+
+ if (background_processor != null) { return; }
+ for (TwoTuple_BlockHashChannelMonitorZ monitor: channel_monitors) {
+ this.chain_monitor.as_Watch().watch_channel(monitor.get_b().get_funding_txo().get_a(), monitor.get_b());
+ }
+ org.ldk.structs.EventHandler ldk_handler = org.ldk.structs.EventHandler.new_impl(event_handler::handle_event);
+ if (this.net_graph != null && scorer != null) {
+ Router router = DefaultRouter.of(net_graph, logger, router_rand_bytes, scorer.as_LockableScore()).as_Router();
+ this.payer = InvoicePayer.of(this.channel_manager.as_Payer(), router, this.logger, ldk_handler, Retry.attempts(3));
+ ldk_handler = this.payer.as_EventHandler();
+ }
+
+ GossipSync gossip_sync;
+ if (this.graph_msg_handler == null)
+ gossip_sync = GossipSync.none();
+ else
+ gossip_sync = GossipSync.p2_p(this.graph_msg_handler);
+
+ Option_WriteableScoreZ writeable_score;
+ if (scorer != null)
+ writeable_score = Option_WriteableScoreZ.some(scorer.as_WriteableScore());
+ else
+ writeable_score = Option_WriteableScoreZ.none();
+
+ background_processor = BackgroundProcessor.start(Persister.new_impl(new Persister.PersisterInterface() {
+ @Override
+ public Result_NoneErrorZ persist_manager(ChannelManager channel_manager) {
+ event_handler.persist_manager(channel_manager.write());
+ return Result_NoneErrorZ.ok();
+ }
+
+ @Override
+ public Result_NoneErrorZ persist_graph(NetworkGraph network_graph) {
+ event_handler.persist_network_graph(network_graph.write());
+ return Result_NoneErrorZ.ok();
}
- }, "NioPeerHandler NIO Thread");
- persister_thread.start();
+
+ @Override
+ public Result_NoneErrorZ persist_scorer(WriteableScore scorer) {
+ event_handler.persist_scorer(scorer.write());
+ return Result_NoneErrorZ.ok();
+ }
+ }), ldk_handler, this.chain_monitor, this.channel_manager, gossip_sync, this.peer_manager, this.logger, writeable_score);
}
/**
- * Interrupt the background thread, stopping the background handling of
+ * Interrupt the background thread, stopping the background handling of events.
*/
public void interrupt() {
- shutdown = true;
- try {
- persister_thread.join();
- } catch (InterruptedException ignored) { }
+ if (this.nio_peer_handler != null)
+ this.nio_peer_handler.interrupt();
+ this.background_processor.stop();
}
}