import org.bitcoinj.core.Transaction;
import org.bitcoinj.script.Script;
import org.junit.jupiter.api.Test;
+import org.ldk.batteries.NioPeerHandler;
import org.ldk.enums.LDKNetwork;
import org.ldk.impl.bindings;
import org.ldk.structs.*;
+import org.ldk.util.TwoTuple;
+import java.io.IOException;
import java.lang.ref.WeakReference;
+import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.HashMap;
-import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.LinkedList;
+
+class HumanObjectPeerTestInstance {
+ private final boolean nice_close;
+ private final boolean use_km_wrapper;
+ private final boolean use_manual_watch;
+ private final boolean reload_peers;
+ private final boolean break_cross_peer_refs;
+ private final boolean use_nio_peer_handler;
+
+ HumanObjectPeerTestInstance(boolean nice_close, boolean use_km_wrapper, boolean use_manual_watch, boolean reload_peers, boolean break_cross_peer_refs, boolean use_nio_peer_handler) {
+ this.nice_close = nice_close;
+ this.use_km_wrapper = use_km_wrapper;
+ this.use_manual_watch = use_manual_watch;
+ this.reload_peers = reload_peers;
+ this.break_cross_peer_refs = break_cross_peer_refs;
+ this.use_nio_peer_handler = use_nio_peer_handler;
+ }
-public class HumanObjectPeerTest {
class Peer {
- final long logger;
- final long fee_estimator;
- final long tx_broadcaster;
- final KeysManager keys;
- final KeysInterface keys_interface;
- final ChannelManager chan_manager;
- final EventsProvider chan_manager_events;
- final NetGraphMsgHandler router;
- final PeerManager peer_manager;
- HashMap<String, Long> monitors; // Wow I forgot just how terrible Java is - we can't put a byte array here.
- byte[] node_id;
+ KeysInterface manual_keysif(KeysInterface underlying_if) {
+ return KeysInterface.new_impl(new KeysInterface.KeysInterfaceInterface() {
+ @Override public byte[] get_node_secret() { return underlying_if.get_node_secret(); }
+ @Override public byte[] get_destination_script() { return underlying_if.get_destination_script(); }
+ @Override public byte[] get_shutdown_pubkey() { return underlying_if.get_shutdown_pubkey(); }
+
+ @Override
+ public ChannelKeys get_channel_keys(boolean inbound, long channel_value_satoshis) {
+ ChannelKeys underlying_ck = underlying_if.get_channel_keys(inbound, channel_value_satoshis);
+ ChannelKeys.ChannelKeysInterface cki = new ChannelKeys.ChannelKeysInterface() {
+ @Override
+ public byte[] get_per_commitment_point(long idx) {
+ return underlying_ck.get_per_commitment_point(idx);
+ }
+
+ @Override
+ public byte[] release_commitment_secret(long idx) {
+ return underlying_ck.release_commitment_secret(idx);
+ }
+
+ @Override
+ public TwoTuple<Long, Long> key_derivation_params() {
+ return new TwoTuple<Long, Long>((long)0, (long)1);
+ }
+
+ @Override
+ public Result_C2Tuple_SignatureCVec_SignatureZZNoneZ sign_counterparty_commitment(CommitmentTransaction commitment_tx) {
+ return underlying_ck.sign_counterparty_commitment(commitment_tx);
+ }
+
+ @Override
+ public Result_C2Tuple_SignatureCVec_SignatureZZNoneZ sign_holder_commitment_and_htlcs(HolderCommitmentTransaction holder_commitment_tx) {
+ return underlying_ck.sign_holder_commitment_and_htlcs(holder_commitment_tx);
+ }
+
+ @Override
+ public Result_SignatureNoneZ sign_justice_transaction(byte[] justice_tx, long input, long amount, byte[] per_commitment_key, HTLCOutputInCommitment htlc) {
+ return underlying_ck.sign_justice_transaction(justice_tx, input, amount, per_commitment_key, htlc);
+ }
+
+ @Override
+ public Result_SignatureNoneZ sign_counterparty_htlc_transaction(byte[] htlc_tx, long input, long amount, byte[] per_commitment_point, HTLCOutputInCommitment htlc) {
+ return underlying_ck.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc);
+ }
+
+ @Override
+ public Result_SignatureNoneZ sign_closing_transaction(byte[] closing_tx) {
+ return underlying_ck.sign_closing_transaction(closing_tx);
+ }
+
+ @Override
+ public Result_SignatureNoneZ sign_channel_announcement(UnsignedChannelAnnouncement msg) {
+ return underlying_ck.sign_channel_announcement(msg);
+ }
+
+ @Override
+ public void ready_channel(ChannelTransactionParameters params) {
+ underlying_ck.ready_channel(params);
+ }
+
+ @Override
+ public byte[] write() {
+ return underlying_ck.write();
+ }
+ };
+ ChannelKeys resp = ChannelKeys.new_impl(cki, underlying_ck.get_pubkeys());
+ must_free_objs.add(new WeakReference<>(cki));
+ must_free_objs.add(new WeakReference<>(resp));
+ must_free_objs.add(new WeakReference<>(underlying_ck));
+ return resp;
+ }
- Peer(byte seed) {
- bindings.LDKLogger log_trait = (String arg) -> System.out.println(seed + ": " + arg);
- logger = bindings.LDKLogger_new(log_trait);
- this.fee_estimator = bindings.LDKFeeEstimator_new(confirmation_target -> 0);
- this.tx_broadcaster = bindings.LDKBroadcasterInterface_new(tx -> {
- // We should broadcast
- });
- this.monitors = new HashMap<>();
- Watch chain_monitor = new Watch(new bindings.LDKWatch() {
@Override
- public long watch_channel(long funding_txo, long monitor) {
+ public byte[] get_secure_random_bytes() {
+ return underlying_if.get_secure_random_bytes();
+ }
+
+ @Override
+ public Result_ChanKeySignerDecodeErrorZ read_chan_signer(byte[] reader) {
+ return underlying_if.read_chan_signer(reader);
+ }
+ });
+ }
+
+ Watch get_manual_watch() {
+ Watch.WatchInterface watch_impl = new Watch.WatchInterface() {
+ public Result_NoneChannelMonitorUpdateErrZ watch_channel(OutPoint funding_txo, ChannelMonitor monitor) {
synchronized (monitors) {
- assert monitors.put(Arrays.toString(bindings.OutPoint_get_txid(funding_txo)), monitor) == null;
+ assert monitors.put(Arrays.toString(funding_txo.get_txid()), monitor) == null;
}
- bindings.OutPoint_free(funding_txo);
- return bindings.CResult_NoneChannelMonitorUpdateErrZ_ok();
+ return new Result_NoneChannelMonitorUpdateErrZ.Result_NoneChannelMonitorUpdateErrZ_OK();
}
- @Override
- public long update_channel(long funding_txo, long update) {
+ public Result_NoneChannelMonitorUpdateErrZ update_channel(OutPoint funding_txo, ChannelMonitorUpdate update) {
synchronized (monitors) {
- String txid = Arrays.toString(bindings.OutPoint_get_txid(funding_txo));
+ String txid = Arrays.toString(funding_txo.get_txid());
assert monitors.containsKey(txid);
- long update_res = bindings.ChannelMonitor_update_monitor(monitors.get(txid), update, tx_broadcaster, logger);
- assert bindings.LDKCResult_NoneMonitorUpdateErrorZ_result_ok(update_res);
- bindings.CResult_NoneMonitorUpdateErrorZ_free(update_res);
+ Result_NoneMonitorUpdateErrorZ update_res = monitors.get(txid).update_monitor(update, tx_broadcaster, fee_estimator, logger);
+ assert update_res instanceof Result_NoneMonitorUpdateErrorZ.Result_NoneMonitorUpdateErrorZ_OK;
}
- bindings.OutPoint_free(funding_txo);
- bindings.ChannelMonitorUpdate_free(update);
- return bindings.CResult_NoneChannelMonitorUpdateErrZ_ok();
+ return new Result_NoneChannelMonitorUpdateErrZ.Result_NoneChannelMonitorUpdateErrZ_OK();
}
@Override
- public long[] release_pending_monitor_events() {
+ public MonitorEvent[] release_pending_monitor_events() {
synchronized (monitors) {
assert monitors.size() <= 1;
- for (Long mon : monitors.values()) {
- return bindings.ChannelMonitor_get_and_clear_pending_monitor_events(mon);
+ for (ChannelMonitor mon : monitors.values()) {
+ return mon.get_and_clear_pending_monitor_events();
}
}
- return new long[0];
+ return new MonitorEvent[0];
+ }
+ };
+ Watch watch = Watch.new_impl(watch_impl);
+ must_free_objs.add(new WeakReference<>(watch_impl));
+ must_free_objs.add(new WeakReference<>(watch));
+ return watch;
+ }
+
+ NioPeerHandler nio_peer_handler;
+ short nio_port;
+ final byte seed;
+ final Logger logger;
+ final FeeEstimator fee_estimator;
+ final BroadcasterInterface tx_broadcaster;
+ final KeysInterface keys_interface;
+ final ChainMonitor chain_monitor;
+ final NetGraphMsgHandler router;
+ final Watch chain_watch;
+ ChannelManager chan_manager;
+ EventsProvider chan_manager_events;
+ PeerManager peer_manager;
+ final HashMap<String, ChannelMonitor> monitors; // Wow I forgot just how terrible Java is - we can't put a byte array here.
+ byte[] node_id;
+ final LinkedList<byte[]> broadcast_set = new LinkedList<>();
+
+ private TwoTuple<OutPoint, byte[]> test_mon_roundtrip(ChannelMonitor mon) {
+ // Because get_funding_txo() returns an OutPoint in a tuple that is a reference to an OutPoint inside the
+ // ChannelMonitor, its a good test to ensure that the OutPoint isn't freed (or is cloned) before the
+ // ChannelMonitor is. This used to be broken.
+ Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ roundtrip_monitor = UtilMethods.constructor_BlockHashChannelMonitorZ_read(mon.write(), keys_interface);
+ assert roundtrip_monitor instanceof Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK;
+ TwoTuple<OutPoint, byte[]> funding_txo = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) roundtrip_monitor).res.b.get_funding_txo();
+ System.gc(); System.runFinalization(); // Give the GC a chance to run.
+ return funding_txo;
+ }
+
+ private Peer(Object _dummy, byte seed) {
+ logger = Logger.new_impl((String arg) -> System.out.println(seed + ": " + arg));
+ fee_estimator = FeeEstimator.new_impl((confirmation_target -> 253));
+ tx_broadcaster = BroadcasterInterface.new_impl(tx -> {
+ broadcast_set.add(tx);
+ });
+ monitors = new HashMap<>();
+ this.seed = seed;
+ Persist persister = Persist.new_impl(new Persist.PersistInterface() {
+ @Override
+ public Result_NoneChannelMonitorUpdateErrZ persist_new_channel(OutPoint id, ChannelMonitor data) {
+ synchronized (monitors) {
+ String key = Arrays.toString(id.to_channel_id());
+ assert monitors.put(key, data) == null;
+ TwoTuple<OutPoint, byte[]> res = test_mon_roundtrip(data);
+ assert Arrays.equals(res.a.get_txid(), id.get_txid());
+ assert res.a.get_index() == id.get_index();
+ }
+ return new Result_NoneChannelMonitorUpdateErrZ.Result_NoneChannelMonitorUpdateErrZ_OK();
+ }
+
+ @Override
+ public Result_NoneChannelMonitorUpdateErrZ update_persisted_channel(OutPoint id, ChannelMonitorUpdate update, ChannelMonitor data) {
+ synchronized (monitors) {
+ String key = Arrays.toString(id.to_channel_id());
+ assert monitors.put(key, data) != null;
+ TwoTuple<OutPoint, byte[]> res = test_mon_roundtrip(data);
+ assert Arrays.equals(res.a.get_txid(), id.get_txid());
+ assert res.a.get_index() == id.get_index();
+ }
+ return new Result_NoneChannelMonitorUpdateErrZ.Result_NoneChannelMonitorUpdateErrZ_OK();
}
});
+ if (use_manual_watch) {
+ chain_watch = get_manual_watch();
+ chain_monitor = null;
+ } else {
+ chain_monitor = ChainMonitor.constructor_new(null, tx_broadcaster, logger, fee_estimator, persister);
+ chain_watch = chain_monitor.as_Watch();
+ }
byte[] key_seed = new byte[32];
for (byte i = 0; i < 32; i++) {
key_seed[i] = (byte) (i ^ seed);
}
- this.keys = KeysManager.constructor_new(key_seed, LDKNetwork.LDKNetwork_Bitcoin, System.currentTimeMillis() / 1000, (int) (System.currentTimeMillis() * 1000) & 0xffffffff);
- this.keys_interface = keys.as_KeysInterface();
- this.chan_manager = ChannelManager.constructor_new(LDKNetwork.LDKNetwork_Bitcoin, new FeeEstimator(confirmation_target -> 0), chain_monitor,
- new BroadcasterInterface(tx -> {
- }), new Logger(log_trait), keys.as_KeysInterface(), UserConfig.constructor_default(), 1);
+ KeysManager keys = KeysManager.constructor_new(key_seed, LDKNetwork.LDKNetwork_Bitcoin, System.currentTimeMillis() / 1000, (int) (System.currentTimeMillis() * 1000));
+ if (use_km_wrapper) {
+ this.keys_interface = manual_keysif(keys.as_KeysInterface());
+ } else {
+ this.keys_interface = keys.as_KeysInterface();
+ }
+ this.router = NetGraphMsgHandler.constructor_new(new byte[32], null, logger);
+ }
+ private void bind_nio() {
+ if (!use_nio_peer_handler) return;
+ try { this.nio_peer_handler = new NioPeerHandler(peer_manager); } catch (IOException e) { assert false; }
+ for (short i = 10_000; true; i++) {
+ try {
+ nio_peer_handler.bind_listener(new InetSocketAddress("127.0.0.1", i));
+ nio_port = i;
+ break;
+ } catch (IOException e) { assert i < 10_500; }
+ }
+ }
+ Peer(byte seed) {
+ this(null, seed);
+ this.chan_manager = ChannelManager.constructor_new(LDKNetwork.LDKNetwork_Bitcoin, FeeEstimator.new_impl(confirmation_target -> 0), chain_watch, tx_broadcaster, logger, this.keys_interface, UserConfig.constructor_default(), 1);
this.node_id = chan_manager.get_our_node_id();
this.chan_manager_events = chan_manager.as_EventsProvider();
- this.router = NetGraphMsgHandler.constructor_new(null, new Logger(log_trait));
byte[] random_data = new byte[32];
for (byte i = 0; i < 32; i++) {
random_data[i] = (byte) ((i ^ seed) ^ 0xf0);
}
- this.peer_manager = PeerManager.constructor_new(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, new Logger(log_trait));
+ this.peer_manager = PeerManager.constructor_new(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ bind_nio();
System.gc();
}
-
- void connect_block(Block b, Transaction t, int height) {
- byte[] header = Arrays.copyOfRange(b.bitcoinSerialize(), 0, 80);
- long[] txn;
- if (t != null)
- txn = new long[]{bindings.C2Tuple_usizeTransactionZ_new(1, bindings.new_txpointer_copy_data(t.bitcoinSerialize()))};
- else
- txn = new long[0];
- bindings.ChannelManager_block_connected(chan_manager._test_only_get_ptr(), header, txn, height);
+ Object ptr_to;
+ Peer(Peer orig) {
+ this(null, orig.seed);
+ ChannelMonitor[] monitors = new ChannelMonitor[1];
synchronized (monitors) {
- for (Long mon : monitors.values()) {
- if (t != null)
- txn = new long[]{bindings.C2Tuple_usizeTransactionZ_new(1, bindings.new_txpointer_copy_data(t.bitcoinSerialize()))};
- else
- txn = new long[0];
- long[] ret = bindings.ChannelMonitor_block_connected(mon, header, txn, height, tx_broadcaster, fee_estimator, logger);
- for (long r : ret) bindings.C2Tuple_TxidCVec_TxOutZZ_free(r);
+ assert orig.monitors.size() == 1;
+ monitors[0] = orig.monitors.values().stream().iterator().next();
+ if (break_cross_peer_refs) {
+ Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = UtilMethods.constructor_BlockHashChannelMonitorZ_read(monitors[0].write(), keys_interface);
+ assert res instanceof Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK;
+ monitors[0] = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) res).res.b;
}
}
+ byte[] serialized = orig.chan_manager.write();
+ Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ read_res =
+ UtilMethods.constructor_BlockHashChannelManagerZ_read(serialized, this.keys_interface, this.fee_estimator, this.chain_watch, this.tx_broadcaster, this.logger, UserConfig.constructor_default(), monitors);
+ if (!break_cross_peer_refs && (use_manual_watch || use_km_wrapper)) {
+ // When we pass monitors[0] into chain_watch.watch_channel we create a reference from the new Peer to a
+ // field in the old peer, preventing freeing of the original Peer until the new Peer is freed. Thus, we
+ // shouldn't bother waiting for the original to be freed later on.
+ cross_reload_ref_pollution = true;
+ }
+ this.chain_watch.watch_channel(monitors[0].get_funding_txo().a, monitors[0]);
+ assert read_res instanceof Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK;
+ this.chan_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK) read_res).res.b;
+ this.node_id = chan_manager.get_our_node_id();
+ this.chan_manager_events = chan_manager.as_EventsProvider();
+
+ if (cross_reload_ref_pollution) {
+ // This really, really needs to be handled at the bindings layer, but its rather complicated -
+ // ChannelSigners can be cloned and passed around without java being involved, resulting in them being
+ // owned by both one or more ChannelMonitors and a ChannelManager, with only one having proper pointers
+ // to the ChannelSigner. Ideally, the ChannelSigner would have a global reference to the Java
+ // implementation class, but that results in circular references. Instead, we need some ability to,
+ // while cloning ChannelSigners, add new references in the calling Java struct (ie ChannelMonitor) to
+ // the ChannelSigner.
+ this.ptr_to = orig.chan_manager;
+ }
+
+ byte[] random_data = new byte[32];
+ for (byte i = 0; i < 32; i++) {
+ random_data[i] = (byte) ((i ^ seed) ^ 0xf0);
+ }
+ this.peer_manager = PeerManager.constructor_new(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger);
+ bind_nio();
}
- void free() {
- // Note that we can't rely on finalizer order, so don't bother trying to rely on it here
- bindings.Logger_free(logger);
- bindings.FeeEstimator_free(fee_estimator);
- bindings.BroadcasterInterface_free(tx_broadcaster);
- synchronized (monitors) {
- for (Long mon : monitors.values()) {
- bindings.ChannelMonitor_free(mon);
+ TwoTuple<byte[], TwoTuple<Integer, TxOut>[]>[] connect_block(Block b, int height, long expected_monitor_update_len) {
+ byte[] header = Arrays.copyOfRange(b.bitcoinSerialize(), 0, 80);
+ TwoTuple<Long, byte[]>[] txn;
+ if (b.hasTransactions()) {
+ assert b.getTransactions().size() == 1;
+ TwoTuple<Long, byte[]> txp = new TwoTuple<>((long) 1, b.getTransactions().get(0).bitcoinSerialize());
+ txn = new TwoTuple[]{txp};
+ } else
+ txn = new TwoTuple[0];
+ chan_manager.block_connected(header, txn, height);
+ if (chain_monitor != null) {
+ chain_monitor.block_connected(header, txn, height);
+ } else {
+ synchronized (monitors) {
+ assert monitors.size() == 1;
+ for (ChannelMonitor mon : monitors.values()) {
+ TwoTuple<byte[], TwoTuple<Integer, TxOut>[]>[] ret = mon.block_connected(header, txn, height, tx_broadcaster, fee_estimator, logger);
+ assert ret.length == expected_monitor_update_len;
+ return ret;
+ }
}
}
+ return null;
}
Route get_route(byte[] dest_node, ChannelDetails[] our_chans) {
try (LockedNetworkGraph netgraph = this.router.read_locked_graph()) {
NetworkGraph graph = netgraph.graph();
- long res = bindings.get_route(this.node_id, graph._test_only_get_ptr(), dest_node, new long[] {our_chans[0]._test_only_get_ptr()},
- new long[0], 1000, 42, this.logger);
+ long res = bindings.get_route(this.node_id, graph._test_only_get_ptr(), dest_node, new long[]{our_chans[0]._test_only_get_ptr()},
+ new long[0], 1000, 42, this.logger._test_only_get_ptr());
assert bindings.LDKCResult_RouteLightningErrorZ_result_ok(res);
- Route copy = Route.constructor_read(bindings.Route_write(bindings.LDKCResult_RouteLightningErrorZ_get_ok(res)));
+ byte[] serialized_route = bindings.Route_write(bindings.LDKCResult_RouteLightningErrorZ_get_ok(res));
+ must_free_objs.add(new WeakReference<>(serialized_route));
+ Result_RouteDecodeErrorZ copy = Route.constructor_read(serialized_route);
+ assert copy instanceof Result_RouteDecodeErrorZ.Result_RouteDecodeErrorZ_OK;
bindings.CResult_RouteLightningErrorZ_free(res);
- return copy;
+ return ((Result_RouteDecodeErrorZ.Result_RouteDecodeErrorZ_OK) copy).res;
}
}
}
- class LongHolder { long val; }
-
- java.util.LinkedList<WeakReference<Object>> must_free_objs = new java.util.LinkedList();
- void do_read_event(ConcurrentLinkedQueue<Thread> list, PeerManager pm, long descriptor, byte[] data) {
- Thread thread = new Thread(() -> {
- long res = bindings.PeerManager_read_event(pm._test_only_get_ptr(), descriptor, data);
- assert bindings.LDKCResult_boolPeerHandleErrorZ_result_ok(res);
- //assert bindings.deref_bool(bindings.LDKCResult_boolPeerHandleErrorZ_get_inner(res));
- bindings.CResult_boolPeerHandleErrorZ_free(res);
- });
- thread.start();
- list.add(thread);
- must_free_objs.add(new WeakReference<>(data));
+ static class DescriptorHolder { SocketDescriptor val; }
+
+ boolean running = false;
+ final LinkedList<Runnable> runqueue = new LinkedList();
+ boolean ran = false;
+ Thread t = new Thread(() -> {
+ while (true) {
+ try {
+ Runnable r;
+ synchronized (runqueue) {
+ while (runqueue.isEmpty()) {
+ runqueue.wait();
+ }
+ running = true;
+ r = runqueue.pollFirst();
+ }
+ r.run();
+ synchronized (runqueue) {
+ running = false;
+ runqueue.notifyAll();
+ }
+ } catch (InterruptedException e) {
+ return;
+ }
+ }
+ });
+ void wait_events_processed(Peer peer1, Peer peer2) {
+ if (use_nio_peer_handler) {
+ peer1.nio_peer_handler.check_events();
+ peer2.nio_peer_handler.check_events();
+ try { Thread.sleep(500); } catch (InterruptedException e) { assert false; }
+ } else {
+ synchronized (runqueue) {
+ ran = false;
+ }
+ while (true) {
+ peer1.peer_manager.process_events();
+ peer2.peer_manager.process_events();
+ synchronized (runqueue) {
+ if (runqueue.isEmpty() && !running) {
+ if (ran) {
+ ran = false;
+ continue;
+ } else { break; }
+ }
+ try { runqueue.wait(); } catch (InterruptedException e) { assert false; }
+ }
+ }
+ }
}
-
- boolean gc_ran = false;
- class GcCheck {
- @Override
- protected void finalize() throws Throwable {
- gc_ran = true;
- super.finalize();
+ void do_read_event(PeerManager pm, SocketDescriptor descriptor, byte[] data) {
+ if (!t.isAlive()) t.start();
+ synchronized (runqueue) {
+ ran = true;
+ runqueue.add(() -> {
+ Result_boolPeerHandleErrorZ res = pm.read_event(descriptor, data);
+ assert res instanceof Result_boolPeerHandleErrorZ.Result_boolPeerHandleErrorZ_OK;
+ });
+ runqueue.notifyAll();
}
+ must_free_objs.add(new WeakReference<>(data));
}
- void do_test_message_handler() throws InterruptedException {
- GcCheck obj = new GcCheck();
- Peer peer1 = new Peer((byte) 1);
- Peer peer2 = new Peer((byte) 2);
- ConcurrentLinkedQueue<Thread> list = new ConcurrentLinkedQueue<Thread>();
- LongHolder descriptor1 = new LongHolder();
- LongHolder descriptor1ref = descriptor1;
- bindings.LDKSocketDescriptor sock1 = new bindings.LDKSocketDescriptor() {
- @Override
- public long send_data(byte[] data, boolean resume_read) {
- do_read_event(list, peer1.peer_manager, descriptor1ref.val, data);
- return data.length;
- }
+ void connect_peers(final Peer peer1, final Peer peer2) {
+ if (use_nio_peer_handler) {
+ try {
+ peer1.nio_peer_handler.connect(peer2.chan_manager.get_our_node_id(), new InetSocketAddress("127.0.0.1", peer2.nio_port));
+ } catch (IOException e) { assert false; }
+ } else {
+ DescriptorHolder descriptor1 = new DescriptorHolder();
+ DescriptorHolder descriptor1ref = descriptor1;
+ SocketDescriptor descriptor2 = SocketDescriptor.new_impl(new SocketDescriptor.SocketDescriptorInterface() {
+ @Override
+ public long send_data(byte[] data, boolean resume_read) {
+ do_read_event(peer1.peer_manager, descriptor1ref.val, data);
+ return data.length;
+ }
- @Override public void disconnect_socket() { assert false; }
- @Override public boolean eq(long other_arg) { return bindings.LDKSocketDescriptor_get_obj_from_jcalls(other_arg).hash() == 2; }
- @Override public long hash() { return 2; }
- };
- long descriptor2 = bindings.LDKSocketDescriptor_new(sock1);
-
- bindings.LDKSocketDescriptor sock2 = new bindings.LDKSocketDescriptor() {
- @Override
- public long send_data(byte[] data, boolean resume_read) {
- do_read_event(list, peer2.peer_manager, descriptor2, data);
- return data.length;
- }
+ @Override public void disconnect_socket() { assert false; }
+ @Override public boolean eq(SocketDescriptor other_arg) { return other_arg.hash() == 2; }
+ @Override public long hash() { return 2; }
+ });
- @Override public void disconnect_socket() { assert false; }
- @Override public boolean eq(long other_arg) { return bindings.LDKSocketDescriptor_get_obj_from_jcalls(other_arg).hash() == 1; }
- @Override public long hash() { return 1; }
- };
- descriptor1.val = bindings.LDKSocketDescriptor_new(sock2);
+ descriptor1.val = SocketDescriptor.new_impl(new SocketDescriptor.SocketDescriptorInterface() {
+ @Override
+ public long send_data(byte[] data, boolean resume_read) {
+ do_read_event(peer2.peer_manager, descriptor2, data);
+ return data.length;
+ }
- long init_vec = bindings.PeerManager_new_outbound_connection(peer1.peer_manager._test_only_get_ptr(), peer2.node_id, descriptor1.val);
- assert (bindings.LDKCResult_CVec_u8ZPeerHandleErrorZ_result_ok(init_vec));
+ @Override public void disconnect_socket() { assert false; }
+ @Override public boolean eq(SocketDescriptor other_arg) { return other_arg.hash() == 1; }
+ @Override public long hash() { return 1; }
+ });
- long con_res = bindings.PeerManager_new_inbound_connection(peer2.peer_manager._test_only_get_ptr(), descriptor2);
- assert (bindings.LDKCResult_NonePeerHandleErrorZ_result_ok(con_res));
- bindings.CResult_NonePeerHandleErrorZ_free(con_res);
- do_read_event(list, peer2.peer_manager, descriptor2, bindings.LDKCResult_CVec_u8ZPeerHandleErrorZ_get_ok(init_vec));
- bindings.CResult_CVec_u8ZPeerHandleErrorZ_free(init_vec);
+ Result_CVec_u8ZPeerHandleErrorZ conn_res = peer1.peer_manager.new_outbound_connection(peer2.node_id, descriptor1.val);
+ assert conn_res instanceof Result_CVec_u8ZPeerHandleErrorZ.Result_CVec_u8ZPeerHandleErrorZ_OK;
- while (!list.isEmpty()) { list.poll().join(); }
+ Result_NonePeerHandleErrorZ inbound_conn_res = peer2.peer_manager.new_inbound_connection(descriptor2);
+ assert inbound_conn_res instanceof Result_NonePeerHandleErrorZ.Result_NonePeerHandleErrorZ_OK;
+ do_read_event(peer2.peer_manager, descriptor2, ((Result_CVec_u8ZPeerHandleErrorZ.Result_CVec_u8ZPeerHandleErrorZ_OK) conn_res).res);
+ }
+ }
- long cc_res = bindings.ChannelManager_create_channel(peer1.chan_manager._test_only_get_ptr(), peer2.node_id, 10000, 1000, 42, 0);
- assert bindings.LDKCResult_NoneAPIErrorZ_result_ok(cc_res);
- bindings.CResult_NoneAPIErrorZ_free(cc_res);
+ TestState do_test_message_handler() throws InterruptedException {
+ Peer peer1 = new Peer((byte) 1);
+ Peer peer2 = new Peer((byte) 2);
- peer1.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
- peer2.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
+ connect_peers(peer1, peer2);
+ wait_events_processed(peer1, peer2);
+
+ Result_NoneAPIErrorZ cc_res = peer1.chan_manager.create_channel(peer2.node_id, 10000, 1000, 42, null);
+ assert cc_res instanceof Result_NoneAPIErrorZ.Result_NoneAPIErrorZ_OK;
+ wait_events_processed(peer1, peer2);
Event[] events = peer1.chan_manager_events.get_and_clear_pending_events();
assert events.length == 1;
assert events[0] instanceof Event.FundingGenerationReady;
- assert ((Event.FundingGenerationReady)events[0]).channel_value_satoshis == 10000;
- assert ((Event.FundingGenerationReady)events[0]).user_channel_id == 42;
- byte[] funding_spk = ((Event.FundingGenerationReady)events[0]).output_script;
+ assert ((Event.FundingGenerationReady) events[0]).channel_value_satoshis == 10000;
+ assert ((Event.FundingGenerationReady) events[0]).user_channel_id == 42;
+ byte[] funding_spk = ((Event.FundingGenerationReady) events[0]).output_script;
assert funding_spk.length == 34 && funding_spk[0] == 0 && funding_spk[1] == 32; // P2WSH
- byte[] chan_id = ((Event.FundingGenerationReady)events[0]).temporary_channel_id;
+ byte[] chan_id = ((Event.FundingGenerationReady) events[0]).temporary_channel_id;
+
+ NetworkParameters bitcoinj_net = NetworkParameters.fromID(NetworkParameters.ID_MAINNET);
- Transaction funding = new Transaction(NetworkParameters.fromID(NetworkParameters.ID_MAINNET));
- funding.addInput(new TransactionInput(NetworkParameters.fromID(NetworkParameters.ID_MAINNET), funding, new byte[0]));
+ Transaction funding = new Transaction(bitcoinj_net);
+ funding.addInput(new TransactionInput(bitcoinj_net, funding, new byte[0]));
funding.getInputs().get(0).setWitness(new TransactionWitness(2)); // Make sure we don't complain about lack of witness
funding.getInput(0).getWitness().setPush(0, new byte[]{0x1});
funding.addOutput(Coin.SATOSHI.multiply(10000), new Script(funding_spk));
peer1.chan_manager.funding_transaction_generated(chan_id, OutPoint.constructor_new(funding.getTxId().getReversedBytes(), (short) 0));
-
- peer1.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
- peer2.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
+ wait_events_processed(peer1, peer2);
events = peer1.chan_manager_events.get_and_clear_pending_events();
assert events.length == 1;
assert events[0] instanceof Event.FundingBroadcastSafe;
- assert ((Event.FundingBroadcastSafe)events[0]).user_channel_id == 42;
+ assert ((Event.FundingBroadcastSafe) events[0]).user_channel_id == 42;
- Block b = new Block(NetworkParameters.fromID(NetworkParameters.ID_MAINNET), 2, Sha256Hash.ZERO_HASH, Sha256Hash.ZERO_HASH, 42, 0, 0, Arrays.asList(new Transaction[]{funding}));
- peer1.connect_block(b, funding, 1);
- peer2.connect_block(b, funding, 1);
+ Block b = new Block(bitcoinj_net, 2, Sha256Hash.ZERO_HASH, Sha256Hash.ZERO_HASH, 42, 0, 0, Arrays.asList(new Transaction[]{funding}));
+ peer1.connect_block(b, 1, 0);
+ peer2.connect_block(b, 1, 0);
for (int height = 2; height < 10; height++) {
- b = new Block(NetworkParameters.fromID(NetworkParameters.ID_MAINNET), 2, b.getHash(), Sha256Hash.ZERO_HASH, 42, 0, 0, Arrays.asList(new Transaction[]{funding}));
- peer1.connect_block(b, null, height);
- peer2.connect_block(b, null, height);
+ b = new Block(bitcoinj_net, 2, b.getHash(), Sha256Hash.ZERO_HASH, 42, 0, 0, Arrays.asList(new Transaction[0]));
+ peer1.connect_block(b, height, 0);
+ peer2.connect_block(b, height, 0);
}
-
- peer1.peer_manager.process_events();
- peer2.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
+ wait_events_processed(peer1, peer2);
peer1.chan_manager.list_channels();
ChannelDetails[] peer1_chans = peer1.chan_manager.list_channels();
Route route = peer1.get_route(peer2.node_id, peer1_chans);
Result_NonePaymentSendFailureZ payment_res = peer1.chan_manager.send_payment(route, payment_hash, new byte[32]);
assert payment_res instanceof Result_NonePaymentSendFailureZ.Result_NonePaymentSendFailureZ_OK;
+ wait_events_processed(peer1, peer2);
- peer1.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
- peer2.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
- peer1.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
-
- long[] peer2_events = bindings.EventsProvider_get_and_clear_pending_events(peer2.chan_manager_events._test_only_get_ptr());
- assert peer2_events.length == 1;
- bindings.LDKEvent forwardable = bindings.LDKEvent_ref_from_ptr(peer2_events[0]);
- assert forwardable instanceof bindings.LDKEvent.PendingHTLCsForwardable;
- bindings.CVec_EventZ_free(peer2_events);
- bindings.ChannelManager_process_pending_htlc_forwards(peer2.chan_manager._test_only_get_ptr());
-
- peer2_events = bindings.EventsProvider_get_and_clear_pending_events(peer2.chan_manager_events._test_only_get_ptr());
- assert peer2_events.length == 1;
- bindings.LDKEvent payment_recvd = bindings.LDKEvent_ref_from_ptr(peer2_events[0]);
- assert payment_recvd instanceof bindings.LDKEvent.PaymentReceived;
- peer2.chan_manager.claim_funds(payment_preimage, new byte[32], ((bindings.LDKEvent.PaymentReceived) payment_recvd).amt);
- bindings.CVec_EventZ_free(peer2_events);
-
- peer2.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
- peer1.peer_manager.process_events();
- while (!list.isEmpty()) { list.poll().join(); }
-
- long[] peer1_events = bindings.EventsProvider_get_and_clear_pending_events(peer1.chan_manager_events._test_only_get_ptr());
- assert peer1_events.length == 1;
- bindings.LDKEvent sent = bindings.LDKEvent_ref_from_ptr(peer1_events[0]);
- assert sent instanceof bindings.LDKEvent.PaymentSent;
- assert Arrays.equals(((bindings.LDKEvent.PaymentSent) sent).payment_preimage, payment_preimage);
- bindings.CVec_EventZ_free(peer1_events);
-
- peer1.free();
- peer2.free();
- bindings.SocketDescriptor_free(descriptor2);
- bindings.SocketDescriptor_free(descriptor1.val);
+ if (reload_peers) {
+ if (use_nio_peer_handler) {
+ peer1.nio_peer_handler.interrupt();
+ peer2.nio_peer_handler.interrupt();
+ }
+ WeakReference<Peer> op1 = new WeakReference<Peer>(peer1);
+ peer1 = new Peer(peer1);
+ peer2 = new Peer(peer2);
+ return new TestState(op1, peer1, peer2, payment_preimage, b.getHash());
+ }
+ return new TestState(null, peer1, peer2, payment_preimage, b.getHash());
}
- @Test
- public void test_message_handler() throws InterruptedException {
- do_test_message_handler();
- while (!gc_ran) {
+ boolean cross_reload_ref_pollution = false;
+ class TestState {
+ private final WeakReference<Peer> ref_block;
+ private final Peer peer1;
+ private final Peer peer2;
+ private final byte[] payment_preimage;
+ public Sha256Hash best_blockhash;
+
+ public TestState(WeakReference<Peer> ref_block, Peer peer1, Peer peer2, byte[] payment_preimage, Sha256Hash best_blockhash) {
+ this.ref_block = ref_block;
+ this.peer1 = peer1;
+ this.peer2 = peer2;
+ this.payment_preimage = payment_preimage;
+ this.best_blockhash = best_blockhash;
+ }
+ }
+ void do_test_message_handler_b(TestState state) {
+ GcCheck obj = new GcCheck();
+ if (state.ref_block != null) {
+ // Ensure the original peers get freed before we move on. Note that we have to be in a different function
+ // scope to do so as the (at least current OpenJDK) JRE won't release anything created in the same scope.
+ while (!cross_reload_ref_pollution && state.ref_block.get() != null) {
+ System.gc();
+ System.runFinalization();
+ }
+ connect_peers(state.peer1, state.peer2);
+ wait_events_processed(state.peer1, state.peer2);
+ }
+
+ Event[] events = state.peer2.chan_manager_events.get_and_clear_pending_events();
+ assert events.length == 1;
+ assert events[0] instanceof Event.PendingHTLCsForwardable;
+ state.peer2.chan_manager.process_pending_htlc_forwards();
+
+ events = state.peer2.chan_manager_events.get_and_clear_pending_events();
+ assert events.length == 1;
+ assert events[0] instanceof Event.PaymentReceived;
+ state.peer2.chan_manager.claim_funds(state.payment_preimage, new byte[32], ((Event.PaymentReceived) events[0]).amt);
+ wait_events_processed(state.peer1, state.peer2);
+
+ events = state.peer1.chan_manager_events.get_and_clear_pending_events();
+ assert events.length == 1;
+ assert events[0] instanceof Event.PaymentSent;
+ assert Arrays.equals(((Event.PaymentSent) events[0]).payment_preimage, state.payment_preimage);
+ wait_events_processed(state.peer1, state.peer2);
+
+ ChannelDetails[] peer1_chans = state.peer1.chan_manager.list_channels();
+
+ if (nice_close) {
+ Result_NoneAPIErrorZ close_res = state.peer1.chan_manager.close_channel(peer1_chans[0].get_channel_id());
+ assert close_res instanceof Result_NoneAPIErrorZ.Result_NoneAPIErrorZ_OK;
+ wait_events_processed(state.peer1, state.peer2);
+
+ assert state.peer1.broadcast_set.size() == 1;
+ assert state.peer2.broadcast_set.size() == 1;
+ } else {
+ state.peer1.chan_manager.force_close_all_channels();
+ wait_events_processed(state.peer1, state.peer2);
+
+ assert state.peer1.broadcast_set.size() == 1;
+ assert state.peer2.broadcast_set.size() == 0;
+
+ NetworkParameters bitcoinj_net = NetworkParameters.fromID(NetworkParameters.ID_MAINNET);
+ Transaction tx = new Transaction(bitcoinj_net, state.peer1.broadcast_set.getFirst());
+ Block b = new Block(bitcoinj_net, 2, state.best_blockhash, Sha256Hash.ZERO_HASH, 42, 0, 0,
+ Arrays.asList(new Transaction[]{tx}));
+ TwoTuple<byte[], TwoTuple<Integer, TxOut>[]>[] watch_outputs = state.peer2.connect_block(b, 1, 1);
+ if (watch_outputs != null) { // We only process watch_outputs manually when we use a manually-build Watch impl
+ assert watch_outputs.length == 1;
+ assert Arrays.equals(watch_outputs[0].a, tx.getTxId().getReversedBytes());
+ assert watch_outputs[0].b.length == 1;
+ }
+
+ // This used to be buggy and double-free, so go ahead and fetch them!
+ for (ChannelMonitor mon : state.peer2.monitors.values()) {
+ byte[][] txn = mon.get_latest_holder_commitment_txn(state.peer2.logger);
+ }
+ }
+
+ if (use_nio_peer_handler) {
+ state.peer1.nio_peer_handler.interrupt();
+ state.peer2.nio_peer_handler.interrupt();
+ }
+ }
+
+ java.util.LinkedList<WeakReference<Object>> must_free_objs = new java.util.LinkedList();
+ boolean gc_ran = false;
+ class GcCheck {
+ @Override
+ protected void finalize() throws Throwable {
+ gc_ran = true;
+ super.finalize();
+ }
+ }
+}
+public class HumanObjectPeerTest {
+ HumanObjectPeerTestInstance do_test_run(boolean nice_close, boolean use_km_wrapper, boolean use_manual_watch, boolean reload_peers, boolean break_cross_peer_refs, boolean nio_peer_handler) throws InterruptedException {
+ HumanObjectPeerTestInstance instance = new HumanObjectPeerTestInstance(nice_close, use_km_wrapper, use_manual_watch, reload_peers, break_cross_peer_refs, nio_peer_handler);
+ HumanObjectPeerTestInstance.TestState state = instance.do_test_message_handler();
+ instance.do_test_message_handler_b(state);
+ return instance;
+ }
+ void do_test(boolean nice_close, boolean use_km_wrapper, boolean use_manual_watch, boolean reload_peers, boolean break_cross_peer_refs, boolean nio_peer_handler) throws InterruptedException {
+ HumanObjectPeerTestInstance instance = do_test_run(nice_close, use_km_wrapper, use_manual_watch, reload_peers, break_cross_peer_refs, nio_peer_handler);
+ while (!instance.gc_ran) {
System.gc();
System.runFinalization();
}
- for (WeakReference<Object> o : must_free_objs)
+ for (WeakReference<Object> o : instance.must_free_objs)
assert o.get() == null;
}
-}
\ No newline at end of file
+ @Test
+ public void test_message_handler() throws InterruptedException {
+ for (int i = 0; i < (1 << 6) - 1; i++) {
+ boolean nice_close = (i & (1 << 0)) != 0;
+ boolean use_km_wrapper = (i & (1 << 1)) != 0;
+ boolean use_manual_watch = (i & (1 << 2)) != 0;
+ boolean reload_peers = (i & (1 << 3)) != 0;
+ boolean break_cross_refs = (i & (1 << 4)) != 0;
+ boolean nio_peer_handler = (i & (1 << 5)) != 0;
+ if (break_cross_refs && !reload_peers) {
+ // There are no cross refs to break without reloading peers.
+ continue;
+ }
+ System.err.println("Running test with flags " + i);
+ do_test(nice_close, use_km_wrapper, use_manual_watch, reload_peers, break_cross_refs, nio_peer_handler);
+ }
+ }
+}