import org.ldk.structs.*;
import org.ldk.util.TwoTuple;
+import java.io.IOException;
+
/**
* A simple utility class which assists in constructing a fresh or deserializing from disk a ChannelManager and one or
* more ChannelMonitors.
+ *
+ * Also constructs a PeerManager and spawns a background thread to monitor for and notify you of relevant Events.
*/
public class ChannelManagerConstructor {
/**
* and then continue to normal application operation.
*/
public final TwoTuple<ChannelMonitor, byte[]>[] channel_monitors;
+ /**
+ * A PeerManager which is constructed to pass messages and handle connections to peers.
+ */
+ public final PeerManager peer_manager;
+ /**
+ * A NioPeerHandler which manages a background thread to handle socket events and pass them to the peer_manager.
+ */
+ public final NioPeerHandler nio_peer_handler;
private final ChainMonitor chain_monitor;
+ private final Logger logger;
+
/**
* Deserializes a channel manager and a set of channel monitors from the given serialized copies and interface implementations
*
*/
public ChannelManagerConstructor(byte[] channel_manager_serialized, byte[][] channel_monitors_serialized,
KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor, @Nullable Filter filter,
+ @Nullable NetGraphMsgHandler router,
BroadcasterInterface tx_broadcaster, Logger logger) throws InvalidSerializedDataException {
final ChannelMonitor[] monitors = new ChannelMonitor[channel_monitors_serialized.length];
this.channel_monitors = new TwoTuple[monitors.length];
for (int i = 0; i < monitors.length; i++) {
- Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = UtilMethods.constructor_BlockHashChannelMonitorZ_read(channel_monitors_serialized[i], keys_interface);
+ Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = UtilMethods.BlockHashChannelMonitorZ_read(channel_monitors_serialized[i], keys_interface);
if (res instanceof Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_Err) {
throw new InvalidSerializedDataException();
}
this.channel_monitors[i] = new TwoTuple<>(monitors[i], ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK)res).res.a);
}
Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ res =
- UtilMethods.constructor_BlockHashChannelManagerZ_read(channel_manager_serialized, keys_interface, fee_estimator, chain_monitor.as_Watch(), tx_broadcaster,
- logger, UserConfig.constructor_default(), monitors);
+ UtilMethods.BlockHashChannelManagerZ_read(channel_manager_serialized, keys_interface, fee_estimator, chain_monitor.as_Watch(), tx_broadcaster,
+ logger, UserConfig.with_default(), monitors);
if (res instanceof Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_Err) {
throw new InvalidSerializedDataException();
}
this.channel_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.b;
this.channel_manager_latest_block_hash = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK)res).res.a;
this.chain_monitor = chain_monitor;
+ this.logger = logger;
+ byte[] random_data = keys_interface.get_secure_random_bytes();
+ if (router != null) {
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ } else {
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), (IgnoringMessageHandler.of()).as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ }
+ NioPeerHandler nio_peer_handler = null;
+ try { nio_peer_handler = new NioPeerHandler(this.peer_manager); } catch (IOException e) { assert false; }
+ this.nio_peer_handler = nio_peer_handler;
if (filter != null) {
for (ChannelMonitor monitor : monitors) {
monitor.load_outputs_to_watch(filter);
*/
public ChannelManagerConstructor(LDKNetwork network, UserConfig config, byte[] current_blockchain_tip_hash, int current_blockchain_tip_height,
KeysInterface keys_interface, FeeEstimator fee_estimator, ChainMonitor chain_monitor,
+ @Nullable NetGraphMsgHandler router,
BroadcasterInterface tx_broadcaster, Logger logger) throws InvalidSerializedDataException {
channel_monitors = new TwoTuple[0];
channel_manager_latest_block_hash = null;
this.chain_monitor = chain_monitor;
- BestBlock block = BestBlock.constructor_new(current_blockchain_tip_hash, current_blockchain_tip_height);
- channel_manager = ChannelManager.constructor_new(fee_estimator, chain_monitor.as_Watch(), tx_broadcaster, logger, keys_interface, config, network, block);
+ BestBlock block = BestBlock.of(current_blockchain_tip_hash, current_blockchain_tip_height);
+ ChainParameters params = ChainParameters.of(network, block);
+ channel_manager = ChannelManager.of(fee_estimator, chain_monitor.as_Watch(), tx_broadcaster, logger, keys_interface, config, params);
+ this.logger = logger;
+ byte[] random_data = keys_interface.get_secure_random_bytes();
+ if (router != null) {
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ } else {
+ this.peer_manager = PeerManager.of(channel_manager.as_ChannelMessageHandler(), (IgnoringMessageHandler.of()).as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ }
+ NioPeerHandler nio_peer_handler = null;
+ try { nio_peer_handler = new NioPeerHandler(this.peer_manager); } catch (IOException e) { assert false; }
+ this.nio_peer_handler = nio_peer_handler;
}
/**
* a background thread is started which will automatically call these methods for you when events occur.
*/
public interface ChannelManagerPersister {
- void handle_events(Event[] events);
+ void handle_event(Event events);
void persist_manager(byte[] channel_manager_bytes);
}
- Thread persister_thread = null;
- volatile boolean shutdown = false;
+ BackgroundProcessor background_processor = null;
/**
* Utility which adds all of the deserialized ChannelMonitors to the chain watch so that further updates from the
* ChannelManagerPersister as required.
*/
public void chain_sync_completed(ChannelManagerPersister persister) {
- if (persister_thread != null) { return; }
+ if (background_processor != null) { return; }
for (TwoTuple<ChannelMonitor, byte[]> monitor: channel_monitors) {
this.chain_monitor.as_Watch().watch_channel(monitor.a.get_funding_txo().a, monitor.a);
}
- persister_thread = new Thread(() -> {
- long lastTimerTick = System.currentTimeMillis();
- while (true) {
- boolean need_persist = this.channel_manager.await_persistable_update_timeout(1);
- Event[] events = this.channel_manager.as_EventsProvider().get_and_clear_pending_events();
- if (events.length != 0) {
- persister.handle_events(events);
- need_persist = true;
- }
- events = this.chain_monitor.as_EventsProvider().get_and_clear_pending_events();
-
- if (events.length != 0) {
- persister.handle_events(events);
- need_persist = true;
- }
- if (need_persist) {
- persister.persist_manager(this.channel_manager.write());
- }
- if (shutdown) {
- return;
- }
- if (lastTimerTick < System.currentTimeMillis() - 60 * 1000) {
- this.channel_manager.timer_tick_occurred();
- lastTimerTick = System.currentTimeMillis();
- }
- }
- }, "NioPeerHandler NIO Thread");
- persister_thread.start();
+ background_processor = BackgroundProcessor.start(org.ldk.structs.ChannelManagerPersister.new_impl(channel_manager -> {
+ persister.persist_manager(channel_manager.write());
+ return Result_NoneErrorZ.ok();
+ }), EventHandler.new_impl(persister::handle_event),
+ this.chain_monitor, this.channel_manager, this.peer_manager, this.logger);
}
/**
- * Interrupt the background thread, stopping the background handling of
+ * Interrupt the background thread, stopping the background handling of events.
*/
public void interrupt() {
- shutdown = true;
- try {
- persister_thread.join();
- } catch (InterruptedException ignored) { }
+ this.background_processor.stop();
+ this.nio_peer_handler.interrupt();
}
}
}
private void bind_nio() {
if (!use_nio_peer_handler) return;
- try { this.nio_peer_handler = new NioPeerHandler(peer_manager); } catch (IOException e) { assert false; }
+ if (use_chan_manager_constructor) {
+ this.nio_peer_handler = this.constructor.nio_peer_handler;
+ } else {
+ try { this.nio_peer_handler = new NioPeerHandler(peer_manager); } catch (IOException e) { assert false; }
+ }
for (short i = 10_000; true; i++) {
try {
nio_peer_handler.bind_listener(new InetSocketAddress("127.0.0.1", i));
if (use_chan_manager_constructor) {
try {
this.constructor = new ChannelManagerConstructor(LDKNetwork.LDKNetwork_Bitcoin, UserConfig.with_default(), new byte[32], 0,
- this.keys_interface, this.fee_estimator, this.chain_monitor, this.tx_broadcaster, this.logger);
+ this.keys_interface, this.fee_estimator, this.chain_monitor, this.router, this.tx_broadcaster, this.logger);
constructor.chain_sync_completed(new ChannelManagerConstructor.ChannelManagerPersister() {
- @Override public void handle_events(Event[] events) {
+ @Override public void handle_event(Event event) {
synchronized (pending_manager_events) {
- pending_manager_events.addAll(Arrays.asList(events));
+ pending_manager_events.add(event);
pending_manager_events.notifyAll();
}
}
@Override public void persist_manager(byte[] channel_manager_bytes) { }
});
this.chan_manager = constructor.channel_manager;
+ this.peer_manager = constructor.peer_manager;
must_free_objs.add(new WeakReference<>(this.chan_manager));
} catch (ChannelManagerConstructor.InvalidSerializedDataException e) {
assert false;
}
} else {
- this.chan_manager = ChannelManager.of(this.fee_estimator, chain_watch, tx_broadcaster, logger, this.keys_interface, UserConfig.with_default(), LDKNetwork.LDKNetwork_Bitcoin, BestBlock.of(new byte[32], 0));
+ ChainParameters params = ChainParameters.of(LDKNetwork.LDKNetwork_Bitcoin, BestBlock.of(new byte[32], 0));
+ this.chan_manager = ChannelManager.of(this.fee_estimator, chain_watch, tx_broadcaster, logger, this.keys_interface, UserConfig.with_default(), params);
+ byte[] random_data = keys_interface.get_secure_random_bytes();
+ this.peer_manager = PeerManager.of(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
}
this.node_id = chan_manager.get_our_node_id();
-
- byte[] random_data = new byte[32];
- for (byte i = 0; i < 32; i++) {
- random_data[i] = (byte) ((i ^ seed) ^ 0xf0);
- }
- this.peer_manager = PeerManager.of(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
bind_nio();
System.gc();
}
- byte[] hexStringToByteArray(String s) {
- int len = s.length();
- byte[] data = new byte[len / 2];
- for (int i = 0; i < len; i += 2) {
- data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4)
- + Character.digit(s.charAt(i+1), 16));
- }
- return data;
- }
-
-
Object ptr_to;
Peer(Peer orig) {
this(null, orig.seed);
byte[] serialized = orig.chan_manager.write();
try {
this.constructor = new ChannelManagerConstructor(serialized, monitors, this.keys_interface,
- this.fee_estimator, this.chain_monitor, this.filter, this.tx_broadcaster, this.logger);
+ this.fee_estimator, this.chain_monitor, this.filter, this.router, this.tx_broadcaster, this.logger);
constructor.chain_sync_completed(new ChannelManagerConstructor.ChannelManagerPersister() {
- @Override public void handle_events(Event[] events) {
+ @Override public void handle_event(Event event) {
synchronized (pending_manager_events) {
- pending_manager_events.addAll(Arrays.asList(events));
+ pending_manager_events.add(event);
pending_manager_events.notifyAll();
}
}
@Override public void persist_manager(byte[] channel_manager_bytes) { }
});
this.chan_manager = constructor.channel_manager;
+ this.peer_manager = constructor.peer_manager;
must_free_objs.add(new WeakReference<>(this.chan_manager));
// If we are using a ChannelManagerConstructor, we may have pending events waiting on the old peer
// which have been removed from the ChannelManager but which we still need to handle.
assert read_res instanceof Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK;
this.chan_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK) read_res).res.b;
this.chain_watch.watch_channel(monitors[0].get_funding_txo().a, monitors[0]);
+ byte[] random_data = keys_interface.get_secure_random_bytes();
+ this.peer_manager = PeerManager.of(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
if (!break_cross_peer_refs && (use_manual_watch || use_km_wrapper)) {
// When we pass monitors[0] into chain_watch.watch_channel we create a reference from the new Peer to a
// field in the old peer, preventing freeing of the original Peer until the new Peer is freed. Thus, we
}
}
this.node_id = chan_manager.get_our_node_id();
+ bind_nio();
if (cross_reload_ref_pollution) {
// This really, really needs to be handled at the bindings layer, but its rather complicated -
// the ChannelSigner.
this.ptr_to = orig.chan_manager;
}
-
- byte[] random_data = new byte[32];
- for (byte i = 0; i < 32; i++) {
- random_data[i] = (byte) ((i ^ seed) ^ 0xf0);
- }
- this.peer_manager = PeerManager.of(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
- bind_nio();
}
TwoTuple<byte[], TwoTuple<Integer, TxOut>[]>[] connect_block(Block b, int height, long expected_monitor_update_len) {
return res;
}
} else if (chain_monitor != null) {
- return chain_monitor.as_EventsProvider().get_and_clear_pending_events();
+ ArrayList<Event> l = new ArrayList<Event>();
+ chain_monitor.as_EventsProvider().process_pending_events(EventHandler.new_impl(l::add));
+ assert l.size() == expected_len;
+ return l.toArray(new Event[0]);
} else {
synchronized (monitors) {
assert monitors.size() == 1;
}
}
} else {
- res = this.chan_manager.as_EventsProvider().get_and_clear_pending_events();
+ ArrayList<Event> l = new ArrayList<Event>();
+ chan_manager.as_EventsProvider().process_pending_events(EventHandler.new_impl(l::add));
+ return l.toArray(new Event[0]);
}
assert res.length == expected_len;
return res;
state.peer1.constructor.interrupt();
state.peer2.constructor.interrupt();
}
+
+ t.interrupt();
}
java.util.LinkedList<WeakReference<Object>> must_free_objs = new java.util.LinkedList();
// There are no cross refs to break without reloading peers.
continue;
}
- if (use_chan_manager_constructor && use_manual_watch) {
- // ChannelManagerConstructor requires a ChainMonitor as the Watch
+ if (use_chan_manager_constructor && (use_manual_watch || !nio_peer_handler)) {
+ // ChannelManagerConstructor requires a ChainMonitor as the Watch and creates a NioPeerHandler for us.
continue;
}
System.err.println("Running test with flags " + i);
bindings.PeerManager_process_events(peer2.peer_manager);
while (!list.isEmpty()) { list.poll().join(); }
- long events[] = bindings.EventsProvider_get_and_clear_pending_events(peer1.chan_manager_events);
- assert events.length == 1;
- bindings.LDKEvent event = bindings.LDKEvent_ref_from_ptr(events[0]);
+ ArrayList<Long> events = new ArrayList();
+ long handler = bindings.LDKEventHandler_new(events::add);
+
+ bindings.EventsProvider_process_pending_events(peer1.chan_manager_events, handler);
+ assert events.size() == 1;
+ bindings.LDKEvent event = bindings.LDKEvent_ref_from_ptr(events.get(0));
assert event instanceof bindings.LDKEvent.FundingGenerationReady;
assert ((bindings.LDKEvent.FundingGenerationReady)event).channel_value_satoshis == 10000;
assert ((bindings.LDKEvent.FundingGenerationReady)event).user_channel_id == 42;
byte[] funding_spk = ((bindings.LDKEvent.FundingGenerationReady)event).output_script;
assert funding_spk.length == 34 && funding_spk[0] == 0 && funding_spk[1] == 32; // P2WSH
byte[] chan_id = ((bindings.LDKEvent.FundingGenerationReady)event).temporary_channel_id;
- bindings.CVec_EventZ_free(events);
+ bindings.Event_free(events.remove(0));
Transaction funding = new Transaction(NetworkParameters.fromID(NetworkParameters.ID_MAINNET));
funding.addInput(new TransactionInput(NetworkParameters.fromID(NetworkParameters.ID_MAINNET), funding, new byte[0]));
bindings.PeerManager_process_events(peer1.peer_manager);
while (!list.isEmpty()) { list.poll().join(); }
- long[] peer2_events = bindings.EventsProvider_get_and_clear_pending_events(peer2.chan_manager_events);
- assert peer2_events.length == 1;
- bindings.LDKEvent forwardable = bindings.LDKEvent_ref_from_ptr(peer2_events[0]);
+ bindings.EventsProvider_process_pending_events(peer2.chan_manager_events, handler);
+ assert events.size() == 1;
+ bindings.LDKEvent forwardable = bindings.LDKEvent_ref_from_ptr(events.get(0));
assert forwardable instanceof bindings.LDKEvent.PendingHTLCsForwardable;
- bindings.CVec_EventZ_free(peer2_events);
+ bindings.Event_free(events.remove(0));
bindings.ChannelManager_process_pending_htlc_forwards(peer2.chan_manager);
- peer2_events = bindings.EventsProvider_get_and_clear_pending_events(peer2.chan_manager_events);
- assert peer2_events.length == 1;
- bindings.LDKEvent payment_recvd = bindings.LDKEvent_ref_from_ptr(peer2_events[0]);
+ bindings.EventsProvider_process_pending_events(peer2.chan_manager_events, handler);
+ assert events.size() == 1;
+ bindings.LDKEvent payment_recvd = bindings.LDKEvent_ref_from_ptr(events.get(0));
assert payment_recvd instanceof bindings.LDKEvent.PaymentReceived;
assert bindings.ChannelManager_claim_funds(peer2.chan_manager, ((bindings.LDKEvent.PaymentReceived) payment_recvd).payment_preimage);
- bindings.CVec_EventZ_free(peer2_events);
+ bindings.Event_free(events.remove(0));
bindings.PeerManager_process_events(peer2.peer_manager);
while (!list.isEmpty()) { list.poll().join(); }
bindings.PeerManager_process_events(peer1.peer_manager);
while (!list.isEmpty()) { list.poll().join(); }
- long[] peer1_events = bindings.EventsProvider_get_and_clear_pending_events(peer1.chan_manager_events);
- assert peer1_events.length == 1;
- bindings.LDKEvent sent = bindings.LDKEvent_ref_from_ptr(peer1_events[0]);
+ bindings.EventsProvider_process_pending_events(peer1.chan_manager_events, handler);
+ assert events.size() == 1;
+ bindings.LDKEvent sent = bindings.LDKEvent_ref_from_ptr(events.get(0));
assert sent instanceof bindings.LDKEvent.PaymentSent;
- bindings.CVec_EventZ_free(peer1_events);
+ bindings.Event_free(events.remove(0));
+
+ bindings.EventHandler_free(handler);
peer1.free();
peer2.free();