* After doing so (and syncing the blockchain on the channel manager as well), you should call chain_sync_completed()
* and then continue to normal application operation.
*/
- public final TwoTuple<ChannelMonitor, byte[]>[] channel_monitors;
+ public final TwoTuple_BlockHashChannelMonitorZ[] channel_monitors;
/**
* A PeerManager which is constructed to pass messages and handle connections to peers.
*/
* A NioPeerHandler which manages a background thread to handle socket events and pass them to the peer_manager.
*/
public final NioPeerHandler nio_peer_handler;
+ /**
+ * If a `NetworkGraph` is provided to the constructor *and* a `LockableScore` is provided to
+ * `chain_sync_completed`, this will be non-null after `chain_sync_completed` returns.
+ *
+ * It should be used to send payments instead of doing so directly via the `channel_manager`.
+ *
+ * When payments are made through this, they are automatically retried and the provided Scorer
+ * will be updated with payment failure data.
+ */
+ @Nullable public InvoicePayer payer;
private final ChainMonitor chain_monitor;
-
+ @Nullable private final NetworkGraph net_graph;
+ @Nullable private final NetGraphMsgHandler graph_msg_handler;
private final Logger logger;
/**
*/
public ChannelManagerConstructor(byte[] channel_manager_serialized, byte[][] channel_monitors_serialized,
KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor, @Nullable Filter filter,
- @Nullable NetGraphMsgHandler router,
+ @Nullable NetworkGraph net_graph,
BroadcasterInterface tx_broadcaster, Logger logger) throws InvalidSerializedDataException {
+ final IgnoringMessageHandler no_custom_messages = IgnoringMessageHandler.of();
final ChannelMonitor[] monitors = new ChannelMonitor[channel_monitors_serialized.length];
- this.channel_monitors = new TwoTuple[monitors.length];
+ this.channel_monitors = new TwoTuple_BlockHashChannelMonitorZ[monitors.length];
for (int i = 0; i < monitors.length; i++) {
- Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = UtilMethods.BlockHashChannelMonitorZ_read(channel_monitors_serialized[i], keys_interface);
+ Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = UtilMethods.C2Tuple_BlockHashChannelMonitorZ_read(channel_monitors_serialized[i], keys_interface);
if (res instanceof Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_Err) {
throw new InvalidSerializedDataException();
}
- monitors[i] = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) res).res.b;
- this.channel_monitors[i] = new TwoTuple<>(monitors[i], ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK)res).res.a);
+ byte[] block_hash = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK)res).res.get_a();
+ monitors[i] = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) res).res.get_b();
+ this.channel_monitors[i] = TwoTuple_BlockHashChannelMonitorZ.of(block_hash, monitors[i]);
}
Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ res =
- UtilMethods.BlockHashChannelManagerZ_read(channel_manager_serialized, keys_interface, fee_estimator, chain_monitor.as_Watch(), tx_broadcaster,
+ UtilMethods.C2Tuple_BlockHashChannelManagerZ_read(channel_manager_serialized, keys_interface, fee_estimator, chain_monitor.as_Watch(), tx_broadcaster,
logger, UserConfig.with_default(), monitors);
if (res instanceof Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_Err) {
throw new InvalidSerializedDataException();
}
- this.channel_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.b;
- this.channel_manager_latest_block_hash = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.a;
+ this.channel_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.get_b();
+ this.channel_manager_latest_block_hash = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.get_a();
this.chain_monitor = chain_monitor;
this.logger = logger;
byte[] random_data = keys_interface.get_secure_random_bytes();
- if (router != null) {
- this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ this.net_graph = net_graph;
+ if (net_graph != null) {
+ //TODO: We really need to expose the Access here to let users prevent DoS issues
+ this.graph_msg_handler = NetGraphMsgHandler.of(net_graph, Option_AccessZ.none(), logger);
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
+ graph_msg_handler.as_RoutingMessageHandler(),
+ keys_interface.get_node_secret(), random_data, logger, no_custom_messages.as_CustomMessageHandler());
} else {
- this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), (IgnoringMessageHandler.of()).as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ this.graph_msg_handler = null;
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), no_custom_messages.as_RoutingMessageHandler(),
+ keys_interface.get_node_secret(), random_data, logger, no_custom_messages.as_CustomMessageHandler());
}
NioPeerHandler nio_peer_handler = null;
try {
*/
public ChannelManagerConstructor(Network network, UserConfig config, byte[] current_blockchain_tip_hash, int current_blockchain_tip_height,
KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor,
- @Nullable NetGraphMsgHandler router,
+ @Nullable NetworkGraph net_graph,
BroadcasterInterface tx_broadcaster, Logger logger) {
- channel_monitors = new TwoTuple[0];
+ final IgnoringMessageHandler no_custom_messages = IgnoringMessageHandler.of();
+ channel_monitors = new TwoTuple_BlockHashChannelMonitorZ[0];
channel_manager_latest_block_hash = null;
this.chain_monitor = chain_monitor;
BestBlock block = BestBlock.of(current_blockchain_tip_hash, current_blockchain_tip_height);
channel_manager = ChannelManager.of(fee_estimator, chain_monitor.as_Watch(), tx_broadcaster, logger, keys_interface, config, params);
this.logger = logger;
byte[] random_data = keys_interface.get_secure_random_bytes();
- if (router != null) {
- this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ this.net_graph = net_graph;
+ if (net_graph != null) {
+ //TODO: We really need to expose the Access here to let users prevent DoS issues
+ this.graph_msg_handler = NetGraphMsgHandler.of(net_graph, Option_AccessZ.none(), logger);
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(),
+ graph_msg_handler.as_RoutingMessageHandler(),
+ keys_interface.get_node_secret(), random_data, logger, no_custom_messages.as_CustomMessageHandler());
} else {
- this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), (IgnoringMessageHandler.of()).as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ this.graph_msg_handler = null;
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), no_custom_messages.as_RoutingMessageHandler(),
+ keys_interface.get_node_secret(), random_data, logger, no_custom_messages.as_CustomMessageHandler());
}
NioPeerHandler nio_peer_handler = null;
try {
* Abstract interface which should handle Events and persist the ChannelManager. When you call chain_sync_completed
* a background thread is started which will automatically call these methods for you when events occur.
*/
- public interface ChannelManagerPersister {
+ public interface EventHandler {
void handle_event(Event events);
void persist_manager(byte[] channel_manager_bytes);
}
* ChannelManager are processed as normal.
*
* This also spawns a background thread which will call the appropriate methods on the provided
- * ChannelManagerPersister as required.
+ * EventHandler as required.
*/
- public void chain_sync_completed(ChannelManagerPersister persister) {
+ public void chain_sync_completed(EventHandler event_handler, @Nullable LockableScore scorer) {
if (background_processor != null) { return; }
- for (TwoTuple<ChannelMonitor, byte[]> monitor: channel_monitors) {
- this.chain_monitor.as_Watch().watch_channel(monitor.a.get_funding_txo().a, monitor.a);
+ for (TwoTuple_BlockHashChannelMonitorZ monitor: channel_monitors) {
+ this.chain_monitor.as_Watch().watch_channel(monitor.get_b().get_funding_txo().get_a(), monitor.get_b());
}
+ org.ldk.structs.EventHandler ldk_handler = org.ldk.structs.EventHandler.new_impl(event_handler::handle_event);
+ if (this.net_graph != null && scorer != null) {
+ Router router = DefaultRouter.of(net_graph, logger).as_Router();
+ this.payer = InvoicePayer.of(this.channel_manager.as_Payer(), router, scorer, this.logger, ldk_handler, RetryAttempts.of(3));
+assert this.payer != null;
+ ldk_handler = this.payer.as_EventHandler();
+ }
+
background_processor = BackgroundProcessor.start(org.ldk.structs.ChannelManagerPersister.new_impl(channel_manager -> {
- persister.persist_manager(channel_manager.write());
+ event_handler.persist_manager(channel_manager.write());
return Result_NoneErrorZ.ok();
- }), EventHandler.new_impl(persister::handle_event),
- this.chain_monitor, this.channel_manager, this.peer_manager, this.logger);
+ }), ldk_handler, this.chain_monitor, this.channel_manager, this.graph_msg_handler, this.peer_manager, this.logger);
}
/**