import org.ldk.util.TwoTuple;
import java.io.IOException;
+import java.util.HashSet;
/**
* An Exception that indicates the serialized data is invalid and has been corrupted on disk. You should attempt to
* restore from a backup if there is one which is known to be current. Otherwise, funds may have been lost.
*/
- public static class InvalidSerializedDataException extends Exception {}
+ public static class InvalidSerializedDataException extends Exception {
+ InvalidSerializedDataException(String reason) {
+ super(reason);
+ }
+ }
/**
* The ChannelManager either deserialized or newly-constructed.
* A NioPeerHandler which manages a background thread to handle socket events and pass them to the peer_manager.
*/
public final NioPeerHandler nio_peer_handler;
+ /**
+ * If a `NetworkGraph` is provided to the constructor *and* a `LockableScore` is provided to
+ * `chain_sync_completed`, this will be non-null after `chain_sync_completed` returns.
+ *
+ * It should be used to send payments instead of doing so directly via the `channel_manager`.
+ *
+ * When payments are made through this, they are automatically retried and the provided Scorer
+ * will be updated with payment failure data.
+ */
+ @Nullable public InvoicePayer payer;
private final ChainMonitor chain_monitor;
-
+ @Nullable private final NetworkGraph net_graph;
+ @Nullable private final NetGraphMsgHandler graph_msg_handler;
private final Logger logger;
- public final @Nullable NetGraphMsgHandler router;
-
/**
* Deserializes a channel manager and a set of channel monitors from the given serialized copies and interface implementations
*
* Note that if the provided Watch is a ChainWatch and has an associated filter, the previously registered
* outputs will be loaded when chain_sync_completed is called.
*/
- public ChannelManagerConstructor(byte[] channel_manager_serialized, byte[][] channel_monitors_serialized,
- KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor, @Nullable Filter filter,
- @Nullable NetGraphMsgHandler router,
+ public ChannelManagerConstructor(byte[] channel_manager_serialized, byte[][] channel_monitors_serialized, UserConfig config,
+ KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor,
+ @Nullable Filter filter, @Nullable NetworkGraph net_graph,
BroadcasterInterface tx_broadcaster, Logger logger) throws InvalidSerializedDataException {
final IgnoringMessageHandler no_custom_messages = IgnoringMessageHandler.of();
final ChannelMonitor[] monitors = new ChannelMonitor[channel_monitors_serialized.length];
this.channel_monitors = new TwoTuple_BlockHashChannelMonitorZ[monitors.length];
+ HashSet<OutPoint> monitor_funding_set = new HashSet();
for (int i = 0; i < monitors.length; i++) {
Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = UtilMethods.C2Tuple_BlockHashChannelMonitorZ_read(channel_monitors_serialized[i], keys_interface);
if (res instanceof Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_Err) {
- throw new InvalidSerializedDataException();
+ throw new InvalidSerializedDataException("Serialized ChannelMonitor was corrupt");
}
byte[] block_hash = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK)res).res.get_a();
monitors[i] = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) res).res.get_b();
this.channel_monitors[i] = TwoTuple_BlockHashChannelMonitorZ.of(block_hash, monitors[i]);
+ if (!monitor_funding_set.add(monitors[i].get_funding_txo().get_a()))
+ throw new InvalidSerializedDataException("Set of ChannelMonitors contained duplicates (ie the same funding_txo was set on multiple monitors)");
}
Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ res =
UtilMethods.C2Tuple_BlockHashChannelManagerZ_read(channel_manager_serialized, keys_interface, fee_estimator, chain_monitor.as_Watch(), tx_broadcaster,
- logger, UserConfig.with_default(), monitors);
+ logger, config, monitors);
if (res instanceof Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_Err) {
- throw new InvalidSerializedDataException();
+ throw new InvalidSerializedDataException("Serialized ChannelManager was corrupt");
}
this.channel_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.get_b();
this.channel_manager_latest_block_hash = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.get_a();
this.chain_monitor = chain_monitor;
- this.router = router;
this.logger = logger;
byte[] random_data = keys_interface.get_secure_random_bytes();
- if (router != null) {
- this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(),
+ this.net_graph = net_graph;
+ if (net_graph != null) {
+ //TODO: We really need to expose the Access here to let users prevent DoS issues
+ this.graph_msg_handler = NetGraphMsgHandler.of(net_graph, Option_AccessZ.none(), logger);
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
+ graph_msg_handler.as_RoutingMessageHandler(),
keys_interface.get_node_secret(), random_data, logger, no_custom_messages.as_CustomMessageHandler());
} else {
- this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), (IgnoringMessageHandler.of()).as_RoutingMessageHandler(),
+ this.graph_msg_handler = null;
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), no_custom_messages.as_RoutingMessageHandler(),
keys_interface.get_node_secret(), random_data, logger, no_custom_messages.as_CustomMessageHandler());
}
NioPeerHandler nio_peer_handler = null;
*/
public ChannelManagerConstructor(Network network, UserConfig config, byte[] current_blockchain_tip_hash, int current_blockchain_tip_height,
KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor,
- @Nullable NetGraphMsgHandler router,
+ @Nullable NetworkGraph net_graph,
BroadcasterInterface tx_broadcaster, Logger logger) {
final IgnoringMessageHandler no_custom_messages = IgnoringMessageHandler.of();
channel_monitors = new TwoTuple_BlockHashChannelMonitorZ[0];
channel_manager_latest_block_hash = null;
this.chain_monitor = chain_monitor;
- this.router = router;
BestBlock block = BestBlock.of(current_blockchain_tip_hash, current_blockchain_tip_height);
ChainParameters params = ChainParameters.of(network, block);
channel_manager = ChannelManager.of(fee_estimator, chain_monitor.as_Watch(), tx_broadcaster, logger, keys_interface, config, params);
this.logger = logger;
byte[] random_data = keys_interface.get_secure_random_bytes();
- if (router != null) {
- this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(),
+ this.net_graph = net_graph;
+ if (net_graph != null) {
+ //TODO: We really need to expose the Access here to let users prevent DoS issues
+ this.graph_msg_handler = NetGraphMsgHandler.of(net_graph, Option_AccessZ.none(), logger);
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
+ graph_msg_handler.as_RoutingMessageHandler(),
keys_interface.get_node_secret(), random_data, logger, no_custom_messages.as_CustomMessageHandler());
} else {
- this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), (IgnoringMessageHandler.of()).as_RoutingMessageHandler(),
+ this.graph_msg_handler = null;
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), no_custom_messages.as_RoutingMessageHandler(),
keys_interface.get_node_secret(), random_data, logger, no_custom_messages.as_CustomMessageHandler());
}
NioPeerHandler nio_peer_handler = null;
* This also spawns a background thread which will call the appropriate methods on the provided
* EventHandler as required.
*/
- public void chain_sync_completed(EventHandler event_handler) {
+ public void chain_sync_completed(EventHandler event_handler, @Nullable MultiThreadedLockableScore scorer) {
if (background_processor != null) { return; }
for (TwoTuple_BlockHashChannelMonitorZ monitor: channel_monitors) {
this.chain_monitor.as_Watch().watch_channel(monitor.get_b().get_funding_txo().get_a(), monitor.get_b());
}
+ org.ldk.structs.EventHandler ldk_handler = org.ldk.structs.EventHandler.new_impl(event_handler::handle_event);
+ if (this.net_graph != null && scorer != null) {
+ Router router = DefaultRouter.of(net_graph, logger).as_Router();
+ this.payer = InvoicePayer.of(this.channel_manager.as_Payer(), router, scorer, this.logger, ldk_handler, RetryAttempts.of(3));
+assert this.payer != null;
+ ldk_handler = this.payer.as_EventHandler();
+ }
+
background_processor = BackgroundProcessor.start(org.ldk.structs.ChannelManagerPersister.new_impl(channel_manager -> {
event_handler.persist_manager(channel_manager.write());
return Result_NoneErrorZ.ok();
- }), org.ldk.structs.EventHandler.new_impl(event_handler::handle_event),
- this.chain_monitor, this.channel_manager, this.router, this.peer_manager, this.logger);
+ }), ldk_handler, this.chain_monitor, this.channel_manager, this.graph_msg_handler, this.peer_manager, this.logger);
}
/**