X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=src%2Ftest%2Fjava%2Forg%2Fldk%2FHumanObjectPeerTest.java;h=79f1fd1e93552893dec65c553b77eeea853ce620;hb=ce83697be30b4a1cb5bc2e725ae506367b97f97e;hp=04eb14c77704ea86315fbeddb0edcfb8b10d41db;hpb=166ff04616cfee975ce991607ca0df88857193df;p=ldk-java diff --git a/src/test/java/org/ldk/HumanObjectPeerTest.java b/src/test/java/org/ldk/HumanObjectPeerTest.java index 04eb14c7..79f1fd1e 100644 --- a/src/test/java/org/ldk/HumanObjectPeerTest.java +++ b/src/test/java/org/ldk/HumanObjectPeerTest.java @@ -1,342 +1,877 @@ package org.ldk; -import com.google.j2objc.annotations.Weak; import org.bitcoinj.core.*; import org.bitcoinj.script.Script; import org.junit.jupiter.api.Test; +import org.ldk.batteries.ChannelManagerConstructor; +import org.ldk.batteries.NioPeerHandler; +import org.ldk.enums.LDKCurrency; import org.ldk.enums.LDKNetwork; import org.ldk.impl.bindings; import org.ldk.structs.*; +import org.ldk.util.TwoTuple; +import java.io.IOException; import java.lang.ref.WeakReference; -import java.util.Arrays; -import java.util.HashMap; -import java.util.concurrent.ConcurrentLinkedQueue; +import java.net.InetSocketAddress; +import java.util.*; + +class HumanObjectPeerTestInstance { + private final boolean nice_close; + private final boolean use_km_wrapper; + private final boolean use_manual_watch; + private final boolean reload_peers; + private final boolean break_cross_peer_refs; + private final boolean use_nio_peer_handler; + private final boolean use_filter; + private final boolean use_chan_manager_constructor; + + HumanObjectPeerTestInstance(boolean nice_close, boolean use_km_wrapper, boolean use_manual_watch, boolean reload_peers, boolean break_cross_peer_refs, boolean use_nio_peer_handler, boolean use_filter, boolean use_chan_manager_constructor) { + this.nice_close = nice_close; + this.use_km_wrapper = use_km_wrapper; + this.use_manual_watch = use_manual_watch; + this.reload_peers = reload_peers; + this.break_cross_peer_refs = break_cross_peer_refs; + this.use_nio_peer_handler = use_nio_peer_handler; + this.use_filter = use_filter; + this.use_chan_manager_constructor = use_chan_manager_constructor; + } -public class HumanObjectPeerTest { class Peer { - final long logger; - final long fee_estimator; - final long tx_broadcaster; - final KeysManager keys; - final KeysInterface keys_interface; - final ChannelManager chan_manager; - final EventsProvider chan_manager_events; - final long chan_handler; - final long router; - final long route_handler; - final long message_handler; - final long peer_manager; - HashMap monitors; // Wow I forgot just how terrible Java is - we can't put a byte array here. - byte[] node_id; + KeysInterface manual_keysif(KeysInterface underlying_if) { + return KeysInterface.new_impl(new KeysInterface.KeysInterfaceInterface() { + @Override public byte[] get_node_secret() { return underlying_if.get_node_secret(); } + @Override public byte[] get_destination_script() { return underlying_if.get_destination_script(); } + @Override public byte[] get_shutdown_pubkey() { return underlying_if.get_shutdown_pubkey(); } + + @Override + public Sign get_channel_signer(boolean inbound, long channel_value_satoshis) { + Sign underlying_ck = underlying_if.get_channel_signer(inbound, channel_value_satoshis); + // TODO: Expose the underlying signer from a Sign + /*BaseSign.BaseSignInterface si = new BaseSign.BaseSignInterface() { + @Override + public byte[] get_per_commitment_point(long idx) { + return underlying_ck.get_per_commitment_point(idx); + } + + @Override + public byte[] release_commitment_secret(long idx) { + return underlying_ck.release_commitment_secret(idx); + } + + @Override + public byte[] channel_keys_id() { + return new byte[32]; + } + + @Override + public Result_C2Tuple_SignatureCVec_SignatureZZNoneZ sign_counterparty_commitment(CommitmentTransaction commitment_tx) { + return underlying_ck.sign_counterparty_commitment(commitment_tx); + } + + @Override + public Result_C2Tuple_SignatureCVec_SignatureZZNoneZ sign_holder_commitment_and_htlcs(HolderCommitmentTransaction holder_commitment_tx) { + return underlying_ck.sign_holder_commitment_and_htlcs(holder_commitment_tx); + } + + @Override + public Result_SignatureNoneZ sign_justice_transaction(byte[] justice_tx, long input, long amount, byte[] per_commitment_key, HTLCOutputInCommitment htlc) { + return underlying_ck.sign_justice_transaction(justice_tx, input, amount, per_commitment_key, htlc); + } + + @Override + public Result_SignatureNoneZ sign_counterparty_htlc_transaction(byte[] htlc_tx, long input, long amount, byte[] per_commitment_point, HTLCOutputInCommitment htlc) { + return underlying_ck.sign_counterparty_htlc_transaction(htlc_tx, input, amount, per_commitment_point, htlc); + } + + @Override + public Result_SignatureNoneZ sign_closing_transaction(byte[] closing_tx) { + return underlying_ck.sign_closing_transaction(closing_tx); + } + + @Override + public Result_SignatureNoneZ sign_channel_announcement(UnsignedChannelAnnouncement msg) { + return underlying_ck.sign_channel_announcement(msg); + } + + @Override + public void ready_channel(ChannelTransactionParameters params) { + underlying_ck.ready_channel(params); + } + + @Override + public byte[] write() { + return underlying_ck.write(); + } + };*/ + //Sign resp = Sign.new_impl(si, underlying_ck.get_pubkeys()); + //must_free_objs.add(new WeakReference<>(si)); + //must_free_objs.add(new WeakReference<>(resp)); + must_free_objs.add(new WeakReference<>(underlying_ck)); + //return resp; + return underlying_ck; + } + + @Override + public byte[] get_secure_random_bytes() { + return underlying_if.get_secure_random_bytes(); + } - Peer(byte seed) { - bindings.LDKLogger log_trait = (String arg) -> System.out.println(seed + ": " + arg); - logger = bindings.LDKLogger_new(log_trait); - this.fee_estimator = bindings.LDKFeeEstimator_new(confirmation_target -> 0); - this.tx_broadcaster = bindings.LDKBroadcasterInterface_new(tx -> { - // We should broadcast - }); - this.monitors = new HashMap<>(); - Watch chain_monitor = new Watch(new bindings.LDKWatch() { @Override - public long watch_channel(long funding_txo, long monitor) { + public Result_SignDecodeErrorZ read_chan_signer(byte[] reader) { + return underlying_if.read_chan_signer(reader); + } + + @Override + public Result_RecoverableSignatureNoneZ sign_invoice(byte[] invoice_preimage) { + return underlying_if.sign_invoice(invoice_preimage); + } + }); + } + + Watch get_manual_watch() { + Watch.WatchInterface watch_impl = new Watch.WatchInterface() { + public Result_NoneChannelMonitorUpdateErrZ watch_channel(OutPoint funding_txo, ChannelMonitor monitor) { synchronized (monitors) { - assert monitors.put(Arrays.toString(bindings.OutPoint_get_txid(funding_txo)), monitor) == null; + assert monitors.put(Arrays.toString(funding_txo.get_txid()), monitor) == null; } - bindings.OutPoint_free(funding_txo); - return bindings.CResult_NoneChannelMonitorUpdateErrZ_ok(); + return Result_NoneChannelMonitorUpdateErrZ.constructor_ok(); } - @Override - public long update_channel(long funding_txo, long update) { + public Result_NoneChannelMonitorUpdateErrZ update_channel(OutPoint funding_txo, ChannelMonitorUpdate update) { synchronized (monitors) { - String txid = Arrays.toString(bindings.OutPoint_get_txid(funding_txo)); + String txid = Arrays.toString(funding_txo.get_txid()); assert monitors.containsKey(txid); - long update_res = bindings.ChannelMonitor_update_monitor(monitors.get(txid), update, tx_broadcaster, logger); - assert bindings.LDKCResult_NoneMonitorUpdateErrorZ_result_ok(update_res); - bindings.CResult_NoneMonitorUpdateErrorZ_free(update_res); + Result_NoneMonitorUpdateErrorZ update_res = monitors.get(txid).update_monitor(update, tx_broadcaster, fee_estimator, logger); + assert update_res instanceof Result_NoneMonitorUpdateErrorZ.Result_NoneMonitorUpdateErrorZ_OK; } - bindings.OutPoint_free(funding_txo); - bindings.ChannelMonitorUpdate_free(update); - return bindings.CResult_NoneChannelMonitorUpdateErrZ_ok(); + return Result_NoneChannelMonitorUpdateErrZ.constructor_ok(); } @Override - public long release_pending_monitor_events() { + public MonitorEvent[] release_pending_monitor_events() { synchronized (monitors) { assert monitors.size() <= 1; - for (Long mon : monitors.values()) { - return bindings.ChannelMonitor_get_and_clear_pending_monitor_events(mon); + for (ChannelMonitor mon : monitors.values()) { + return mon.get_and_clear_pending_monitor_events(); } } - return bindings.new_empty_slice_vec(); + return new MonitorEvent[0]; + } + }; + Watch watch = Watch.new_impl(watch_impl); + must_free_objs.add(new WeakReference<>(watch_impl)); + must_free_objs.add(new WeakReference<>(watch)); + return watch; + } + + NioPeerHandler nio_peer_handler; + short nio_port; + final byte seed; + final Logger logger; + final FeeEstimator fee_estimator; + final BroadcasterInterface tx_broadcaster; + final KeysManager explicit_keys_manager; + final KeysInterface keys_interface; + final ChainMonitor chain_monitor; + final NetGraphMsgHandler router; + final Watch chain_watch; + final HashSet filter_additions; + final Filter filter; + ChannelManager chan_manager; + PeerManager peer_manager; + final HashMap monitors; // Wow I forgot just how terrible Java is - we can't put a byte array here. + byte[] node_id; + final LinkedList broadcast_set = new LinkedList<>(); + final LinkedList pending_manager_events = new LinkedList<>(); + ChannelManagerConstructor constructor = null; + GcCheck obj = new GcCheck(); + + private TwoTuple test_mon_roundtrip(ChannelMonitor mon) { + // Because get_funding_txo() returns an OutPoint in a tuple that is a reference to an OutPoint inside the + // ChannelMonitor, its a good test to ensure that the OutPoint isn't freed (or is cloned) before the + // ChannelMonitor is. This used to be broken. + Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ roundtrip_monitor = UtilMethods.constructor_BlockHashChannelMonitorZ_read(mon.write(), keys_interface); + assert roundtrip_monitor instanceof Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK; + TwoTuple funding_txo = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) roundtrip_monitor).res.b.get_funding_txo(); + System.gc(); System.runFinalization(); // Give the GC a chance to run. + return funding_txo; + } + + private Peer(Object _dummy, byte seed) { + logger = Logger.new_impl((String arg) -> System.out.println(seed + ": " + arg)); + fee_estimator = FeeEstimator.new_impl((confirmation_target -> 253)); + tx_broadcaster = BroadcasterInterface.new_impl(tx -> { + broadcast_set.add(tx); + }); + monitors = new HashMap<>(); + this.seed = seed; + Persist persister = Persist.new_impl(new Persist.PersistInterface() { + @Override + public Result_NoneChannelMonitorUpdateErrZ persist_new_channel(OutPoint id, ChannelMonitor data) { + synchronized (monitors) { + String key = Arrays.toString(id.to_channel_id()); + assert monitors.put(key, data) == null; + TwoTuple res = test_mon_roundtrip(data); + assert Arrays.equals(res.a.get_txid(), id.get_txid()); + assert res.a.get_index() == id.get_index(); + } + return Result_NoneChannelMonitorUpdateErrZ.constructor_ok(); + } + + @Override + public Result_NoneChannelMonitorUpdateErrZ update_persisted_channel(OutPoint id, ChannelMonitorUpdate update, ChannelMonitor data) { + synchronized (monitors) { + String key = Arrays.toString(id.to_channel_id()); + assert monitors.put(key, data) != null; + TwoTuple res = test_mon_roundtrip(data); + assert Arrays.equals(res.a.get_txid(), id.get_txid()); + assert res.a.get_index() == id.get_index(); + } + return Result_NoneChannelMonitorUpdateErrZ.constructor_ok(); } }); + filter_additions = new HashSet<>(); + if (use_filter) { + this.filter = Filter.new_impl(new Filter.FilterInterface() { + @Override public void register_tx(byte[] txid, byte[] script_pubkey) { + filter_additions.add(Arrays.toString(txid)); + } + @Override public Option_C2Tuple_usizeTransactionZZ register_output(WatchedOutput output) { + filter_additions.add(Arrays.toString(output.get_outpoint().get_txid()) + ":" + output.get_outpoint().get_index()); + return Option_C2Tuple_usizeTransactionZZ.constructor_none(); + } + }); + } else { + this.filter = null; + } + + if (use_manual_watch) { + chain_watch = get_manual_watch(); + chain_monitor = null; + } else { + chain_monitor = ChainMonitor.constructor_new(filter, tx_broadcaster, logger, fee_estimator, persister); + chain_watch = chain_monitor.as_Watch(); + } + byte[] key_seed = new byte[32]; for (byte i = 0; i < 32; i++) { key_seed[i] = (byte) (i ^ seed); } - this.keys = new KeysManager(key_seed, LDKNetwork.LDKNetwork_Bitcoin, System.currentTimeMillis() / 1000, (int) (System.currentTimeMillis() * 1000) & 0xffffffff); - this.keys_interface = keys.as_KeysInterface(); - this.chan_manager = new ChannelManager(LDKNetwork.LDKNetwork_Bitcoin, new FeeEstimator(confirmation_target -> 0), chain_monitor, - new BroadcasterInterface(tx -> { - }), new Logger(log_trait), keys.as_KeysInterface(), new UserConfig(), 1); - this.node_id = chan_manager.get_our_node_id(); - this.chan_manager_events = chan_manager.as_EventsProvider(); + KeysManager keys = KeysManager.constructor_new(key_seed, System.currentTimeMillis() / 1000, (int) (System.currentTimeMillis() * 1000)); + if (use_km_wrapper) { + this.keys_interface = manual_keysif(keys.as_KeysInterface()); + this.explicit_keys_manager = null; + } else { + this.keys_interface = keys.as_KeysInterface(); + this.explicit_keys_manager = keys; + } + this.router = NetGraphMsgHandler.constructor_new(new byte[32], null, logger); + } + private void bind_nio() { + if (!use_nio_peer_handler) return; + try { this.nio_peer_handler = new NioPeerHandler(peer_manager); } catch (IOException e) { assert false; } + for (short i = 10_000; true; i++) { + try { + nio_peer_handler.bind_listener(new InetSocketAddress("127.0.0.1", i)); + nio_port = i; + break; + } catch (IOException e) { assert i < 10_500; } + } + } + Peer(byte seed) { + this(null, seed); + if (use_chan_manager_constructor) { + try { + this.constructor = new ChannelManagerConstructor(LDKNetwork.LDKNetwork_Bitcoin, UserConfig.constructor_default(), new byte[32], 0, + this.keys_interface, this.fee_estimator, this.chain_monitor, this.tx_broadcaster, this.logger); + constructor.chain_sync_completed(new ChannelManagerConstructor.ChannelManagerPersister() { + @Override public void handle_events(Event[] events) { + synchronized (pending_manager_events) { + pending_manager_events.addAll(Arrays.asList(events)); + pending_manager_events.notifyAll(); + } + } + @Override public void persist_manager(byte[] channel_manager_bytes) { } + }); + this.chan_manager = constructor.channel_manager; + must_free_objs.add(new WeakReference<>(this.chan_manager)); + } catch (ChannelManagerConstructor.InvalidSerializedDataException e) { + assert false; + } + } else { + this.chan_manager = ChannelManager.constructor_new(this.fee_estimator, chain_watch, tx_broadcaster, logger, this.keys_interface, UserConfig.constructor_default(), LDKNetwork.LDKNetwork_Bitcoin, BestBlock.constructor_new(new byte[32], 0)); + } - this.chan_handler = bindings.ChannelManager_as_ChannelMessageHandler(chan_manager._test_only_get_ptr()); - this.router = bindings.NetGraphMsgHandler_new(0, logger); - this.route_handler = bindings.NetGraphMsgHandler_as_RoutingMessageHandler(router); - this.message_handler = bindings.MessageHandler_new(chan_handler, route_handler); + this.node_id = chan_manager.get_our_node_id(); byte[] random_data = new byte[32]; for (byte i = 0; i < 32; i++) { random_data[i] = (byte) ((i ^ seed) ^ 0xf0); } - this.peer_manager = bindings.PeerManager_new(message_handler, keys_interface.call_get_node_secret(), random_data, logger); + this.peer_manager = PeerManager.constructor_new(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger); + bind_nio(); System.gc(); } - void connect_block(Block b, Transaction t, int height) { + byte[] hexStringToByteArray(String s) { + int len = s.length(); + byte[] data = new byte[len / 2]; + for (int i = 0; i < len; i += 2) { + data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4) + + Character.digit(s.charAt(i+1), 16)); + } + return data; + } + + + Object ptr_to; + Peer(Peer orig) { + this(null, orig.seed); + if (use_chan_manager_constructor) { + byte[][] monitors = {orig.monitors.values().stream().iterator().next().write()}; + byte[] serialized = orig.chan_manager.write(); + try { + this.constructor = new ChannelManagerConstructor(serialized, monitors, this.keys_interface, + this.fee_estimator, this.chain_monitor, this.filter, this.tx_broadcaster, this.logger); + constructor.chain_sync_completed(new ChannelManagerConstructor.ChannelManagerPersister() { + @Override public void handle_events(Event[] events) { + synchronized (pending_manager_events) { + pending_manager_events.addAll(Arrays.asList(events)); + pending_manager_events.notifyAll(); + } + } + @Override public void persist_manager(byte[] channel_manager_bytes) { } + }); + this.chan_manager = constructor.channel_manager; + must_free_objs.add(new WeakReference<>(this.chan_manager)); + // If we are using a ChannelManagerConstructor, we may have pending events waiting on the old peer + // which have been removed from the ChannelManager but which we still need to handle. + this.pending_manager_events.addAll(orig.pending_manager_events); + if (!this.pending_manager_events.isEmpty()) { + // However, this implies cross_reload_ref_pollution + cross_reload_ref_pollution = true; + } + } catch (ChannelManagerConstructor.InvalidSerializedDataException e) { + assert false; + } + } else { + ChannelMonitor[] monitors = new ChannelMonitor[1]; + assert orig.monitors.size() == 1; + if (!break_cross_peer_refs) { + monitors[0] = orig.monitors.values().stream().iterator().next(); + } else { + byte[] serialized = orig.monitors.values().stream().iterator().next().write(); + Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ res = + UtilMethods.constructor_BlockHashChannelMonitorZ_read(serialized, this.keys_interface); + assert res instanceof Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK; + monitors[0] = ((Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ.Result_C2Tuple_BlockHashChannelMonitorZDecodeErrorZ_OK) res).res.b; + } + byte[] serialized = orig.chan_manager.write(); + Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ read_res = + UtilMethods.constructor_BlockHashChannelManagerZ_read(serialized, this.keys_interface, this.fee_estimator, this.chain_watch, this.tx_broadcaster, this.logger, UserConfig.constructor_default(), monitors); + assert read_res instanceof Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK; + this.chan_manager = ((Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ.Result_C2Tuple_BlockHashChannelManagerZDecodeErrorZ_OK) read_res).res.b; + this.chain_watch.watch_channel(monitors[0].get_funding_txo().a, monitors[0]); + if (!break_cross_peer_refs && (use_manual_watch || use_km_wrapper)) { + // When we pass monitors[0] into chain_watch.watch_channel we create a reference from the new Peer to a + // field in the old peer, preventing freeing of the original Peer until the new Peer is freed. Thus, we + // shouldn't bother waiting for the original to be freed later on. + cross_reload_ref_pollution = true; + } + } + this.node_id = chan_manager.get_our_node_id(); + + if (cross_reload_ref_pollution) { + // This really, really needs to be handled at the bindings layer, but its rather complicated - + // ChannelSigners can be cloned and passed around without java being involved, resulting in them being + // owned by both one or more ChannelMonitors and a ChannelManager, with only one having proper pointers + // to the ChannelSigner. Ideally, the ChannelSigner would have a global reference to the Java + // implementation class, but that results in circular references. Instead, we need some ability to, + // while cloning ChannelSigners, add new references in the calling Java struct (ie ChannelMonitor) to + // the ChannelSigner. + this.ptr_to = orig.chan_manager; + } + + byte[] random_data = new byte[32]; + for (byte i = 0; i < 32; i++) { + random_data[i] = (byte) ((i ^ seed) ^ 0xf0); + } + this.peer_manager = PeerManager.constructor_new(chan_manager.as_ChannelMessageHandler(), router.as_RoutingMessageHandler(), keys_interface.get_node_secret(), random_data, logger); + bind_nio(); + } + + TwoTuple[]>[] connect_block(Block b, int height, long expected_monitor_update_len) { byte[] header = Arrays.copyOfRange(b.bitcoinSerialize(), 0, 80); - long txn; - if (t != null) - txn = bindings.LDKCVecTempl_C2TupleTempl_usize__Transaction_new( - new long[]{bindings.C2Tuple_usizeTransactionZ_new(1, bindings.new_txpointer_copy_data(t.bitcoinSerialize()))}); - else - txn = bindings.LDKCVecTempl_C2TupleTempl_usize__Transaction_new(new long[0]); - bindings.ChannelManager_block_connected(chan_manager._test_only_get_ptr(), header, txn, height); - synchronized (monitors) { - for (Long mon : monitors.values()) { - if (t != null) - txn = bindings.LDKCVecTempl_C2TupleTempl_usize__Transaction_new( - new long[]{bindings.C2Tuple_usizeTransactionZ_new(1, bindings.new_txpointer_copy_data(t.bitcoinSerialize()))}); - else - txn = bindings.LDKCVecTempl_C2TupleTempl_usize__Transaction_new(new long[0]); - long ret = bindings.ChannelMonitor_block_connected(mon, header, txn, height, tx_broadcaster, fee_estimator, logger); - bindings.CVec_C2Tuple_TxidCVec_TxOutZZZ_free(ret); + TwoTuple[] txn; + if (b.hasTransactions()) { + assert b.getTransactions().size() == 1; + TwoTuple txp = new TwoTuple<>((long) 0, b.getTransactions().get(0).bitcoinSerialize()); + txn = new TwoTuple[]{txp}; + } else + txn = new TwoTuple[0]; + if (chain_monitor != null) { + chan_manager.as_Listen().block_connected(b.bitcoinSerialize(), height); + chain_monitor.as_Listen().block_connected(b.bitcoinSerialize(), height); + } else { + chan_manager.as_Confirm().transactions_confirmed(header, txn, height); + chan_manager.as_Confirm().best_block_updated(header, height); + // Connect manually if we aren't using a ChainMonitor and are implementing Watch ourselves + synchronized (monitors) { + assert monitors.size() == 1; + for (ChannelMonitor mon : monitors.values()) { + TwoTuple[]>[] ret = mon.block_connected(header, txn, height, tx_broadcaster, fee_estimator, logger); + assert ret.length == expected_monitor_update_len; + return ret; + } } } + return null; } - void free() { - // Note that we can't rely on finalizer order, so don't bother trying to rely on it here - bindings.Logger_free(logger); - bindings.FeeEstimator_free(fee_estimator); - bindings.BroadcasterInterface_free(tx_broadcaster); - bindings.ChannelMessageHandler_free(chan_handler); - bindings.NetGraphMsgHandler_free(router); - bindings.RoutingMessageHandler_free(route_handler); - //MessageHandler was actually moved into the route_handler!: bindings.MessageHandler_free(message_handler); - bindings.PeerManager_free(peer_manager); - synchronized (monitors) { - for (Long mon : monitors.values()) { - bindings.ChannelMonitor_free(mon); + Event[] get_monitor_events(int expected_len) { + if (use_chan_manager_constructor) { + while (true) { + synchronized (this.pending_manager_events) { + if (expected_len != 0 && this.pending_manager_events.size() != expected_len) { + break; + } + } + try { Thread.sleep(500); } catch (InterruptedException e) { assert false; } + break; + } + synchronized (this.pending_manager_events) { + Event[] res = this.pending_manager_events.toArray(new Event[0]); + this.pending_manager_events.clear(); + assert res.length == expected_len; + return res; + } + } else if (chain_monitor != null) { + return chain_monitor.as_EventsProvider().get_and_clear_pending_events(); + } else { + synchronized (monitors) { + assert monitors.size() == 1; + for (ChannelMonitor mon : monitors.values()) { + Event[] res = mon.get_and_clear_pending_events(); + assert res.length == expected_len; + return res; + } + return null; } } } + + Event[] get_manager_events(int expected_len) { + Event[] res = new Event[0]; + if (use_chan_manager_constructor) { + while (res.length < expected_len) { + synchronized (this.pending_manager_events) { + res = this.pending_manager_events.toArray(res); + assert res.length == expected_len || res.length == 0; // We don't handle partial results + this.pending_manager_events.clear(); + if (res.length < expected_len) { + try { this.pending_manager_events.wait(); } catch (InterruptedException e) { assert false; } + } + } + } + } else { + res = this.chan_manager.as_EventsProvider().get_and_clear_pending_events(); + } + assert res.length == expected_len; + return res; + } + + Route get_route(byte[] dest_node, ChannelDetails[] our_chans) { + try (LockedNetworkGraph netgraph = this.router.read_locked_graph()) { + NetworkGraph graph = netgraph.graph(); + long res = bindings.get_route(this.node_id, graph._test_only_get_ptr(), dest_node, 0L, new long[]{our_chans[0]._test_only_get_ptr()}, + new long[0], 1000000, 42, this.logger._test_only_get_ptr()); + assert bindings.LDKCResult_RouteLightningErrorZ_result_ok(res); + byte[] serialized_route = bindings.Route_write(bindings.LDKCResult_RouteLightningErrorZ_get_ok(res)); + must_free_objs.add(new WeakReference<>(serialized_route)); + Result_RouteDecodeErrorZ copy = Route.constructor_read(serialized_route); + assert copy instanceof Result_RouteDecodeErrorZ.Result_RouteDecodeErrorZ_OK; + bindings.CResult_RouteLightningErrorZ_free(res); + return ((Result_RouteDecodeErrorZ.Result_RouteDecodeErrorZ_OK) copy).res; + } + } } - class LongHolder { long val; } - - java.util.LinkedList must_free_objs = new java.util.LinkedList(); - void do_read_event(ConcurrentLinkedQueue list, long pm, long descriptor, byte[] data) { - Thread thread = new Thread(() -> { - long res = bindings.PeerManager_read_event(pm, descriptor, data); - assert bindings.LDKCResult_boolPeerHandleErrorZ_result_ok(res); - //assert bindings.deref_bool(bindings.LDKCResult_boolPeerHandleErrorZ_get_inner(res)); - bindings.CResult_boolPeerHandleErrorZ_free(res); - }); - thread.start(); - list.add(thread); - must_free_objs.add(new WeakReference(data)); + static class DescriptorHolder { SocketDescriptor val; } + + boolean running = false; + final LinkedList runqueue = new LinkedList(); + boolean ran = false; + Thread t = new Thread(() -> { + while (true) { + try { + Runnable r; + synchronized (runqueue) { + while (runqueue.isEmpty()) { + runqueue.wait(); + } + running = true; + r = runqueue.pollFirst(); + } + r.run(); + synchronized (runqueue) { + running = false; + runqueue.notifyAll(); + } + } catch (InterruptedException e) { + return; + } + } + }); + void wait_events_processed(Peer peer1, Peer peer2) { + if (use_nio_peer_handler) { + peer1.nio_peer_handler.check_events(); + peer2.nio_peer_handler.check_events(); + try { Thread.sleep(400); } catch (InterruptedException e) { assert false; } + } else { + synchronized (runqueue) { + ran = false; + } + while (true) { + peer1.peer_manager.process_events(); + peer2.peer_manager.process_events(); + synchronized (runqueue) { + if (runqueue.isEmpty() && !running) { + if (ran) { + ran = false; + continue; + } else { break; } + } + try { runqueue.wait(); } catch (InterruptedException e) { assert false; } + } + } + } + } + void do_read_event(PeerManager pm, SocketDescriptor descriptor, byte[] data) { + if (!t.isAlive()) t.start(); + synchronized (runqueue) { + ran = true; + runqueue.add(() -> { + Result_boolPeerHandleErrorZ res = pm.read_event(descriptor, data); + assert res instanceof Result_boolPeerHandleErrorZ.Result_boolPeerHandleErrorZ_OK; + }); + runqueue.notifyAll(); + } + must_free_objs.add(new WeakReference<>(data)); } - boolean gc_ran = false; - class GcCheck { - @Override - protected void finalize() throws Throwable { - gc_ran = true; - super.finalize(); + void connect_peers(final Peer peer1, final Peer peer2) { + if (use_nio_peer_handler) { + try { + peer1.nio_peer_handler.connect(peer2.chan_manager.get_our_node_id(), new InetSocketAddress("127.0.0.1", peer2.nio_port), 100); + } catch (IOException e) { assert false; } + } else { + DescriptorHolder descriptor1 = new DescriptorHolder(); + DescriptorHolder descriptor1ref = descriptor1; + SocketDescriptor descriptor2 = SocketDescriptor.new_impl(new SocketDescriptor.SocketDescriptorInterface() { + @Override + public long send_data(byte[] data, boolean resume_read) { + do_read_event(peer1.peer_manager, descriptor1ref.val, data); + return data.length; + } + + @Override public void disconnect_socket() { assert false; } + @Override public boolean eq(SocketDescriptor other_arg) { return other_arg.hash() == 2; } + @Override public long hash() { return 2; } + }); + + descriptor1.val = SocketDescriptor.new_impl(new SocketDescriptor.SocketDescriptorInterface() { + @Override + public long send_data(byte[] data, boolean resume_read) { + do_read_event(peer2.peer_manager, descriptor2, data); + return data.length; + } + + @Override public void disconnect_socket() { assert false; } + @Override public boolean eq(SocketDescriptor other_arg) { return other_arg.hash() == 1; } + @Override public long hash() { return 1; } + }); + + Result_CVec_u8ZPeerHandleErrorZ conn_res = peer1.peer_manager.new_outbound_connection(peer2.node_id, descriptor1.val); + assert conn_res instanceof Result_CVec_u8ZPeerHandleErrorZ.Result_CVec_u8ZPeerHandleErrorZ_OK; + + Result_NonePeerHandleErrorZ inbound_conn_res = peer2.peer_manager.new_inbound_connection(descriptor2); + assert inbound_conn_res instanceof Result_NonePeerHandleErrorZ.Result_NonePeerHandleErrorZ_OK; + do_read_event(peer2.peer_manager, descriptor2, ((Result_CVec_u8ZPeerHandleErrorZ.Result_CVec_u8ZPeerHandleErrorZ_OK) conn_res).res); } } - void do_test_message_handler() throws InterruptedException { - GcCheck obj = new GcCheck(); + + TestState do_test_message_handler() throws InterruptedException { Peer peer1 = new Peer((byte) 1); Peer peer2 = new Peer((byte) 2); - ConcurrentLinkedQueue list = new ConcurrentLinkedQueue(); - LongHolder descriptor1 = new LongHolder(); - LongHolder descriptor1ref = descriptor1; - bindings.LDKSocketDescriptor sock1 = new bindings.LDKSocketDescriptor() { - @Override - public long send_data(byte[] data, boolean resume_read) { - do_read_event(list, peer1.peer_manager, descriptor1ref.val, data); - return data.length; - } + connect_peers(peer1, peer2); + wait_events_processed(peer1, peer2); - @Override public void disconnect_socket() { assert false; } - @Override public boolean eq(long other_arg) { return bindings.LDKSocketDescriptor_get_obj_from_jcalls(other_arg).hash() == 2; } - @Override public long hash() { return 2; } - }; - long descriptor2 = bindings.LDKSocketDescriptor_new(sock1); - - bindings.LDKSocketDescriptor sock2 = new bindings.LDKSocketDescriptor() { - @Override - public long send_data(byte[] data, boolean resume_read) { - do_read_event(list, peer2.peer_manager, descriptor2, data); - return data.length; - } + Result_NoneAPIErrorZ cc_res = peer1.chan_manager.create_channel(peer2.node_id, 10000, 1000, 42, null); + assert cc_res instanceof Result_NoneAPIErrorZ.Result_NoneAPIErrorZ_OK; + wait_events_processed(peer1, peer2); - @Override public void disconnect_socket() { assert false; } - @Override public boolean eq(long other_arg) { return bindings.LDKSocketDescriptor_get_obj_from_jcalls(other_arg).hash() == 1; } - @Override public long hash() { return 1; } - }; - descriptor1.val = bindings.LDKSocketDescriptor_new(sock2); - - long init_vec = bindings.PeerManager_new_outbound_connection(peer1.peer_manager, peer2.node_id, descriptor1.val); - assert (bindings.LDKCResult_CVec_u8ZPeerHandleErrorZ_result_ok(init_vec)); - - long con_res = bindings.PeerManager_new_inbound_connection(peer2.peer_manager, descriptor2); - assert (bindings.LDKCResult_NonePeerHandleErrorZ_result_ok(con_res)); - bindings.CResult_NonePeerHandleErrorZ_free(con_res); - do_read_event(list, peer2.peer_manager, descriptor2, bindings.get_u8_slice_bytes(bindings.LDKCResult_CVec_u8ZPeerHandleErrorZ_get_inner(init_vec))); - bindings.CResult_CVec_u8ZPeerHandleErrorZ_free(init_vec); - - while (!list.isEmpty()) { list.poll().join(); } - - long cc_res = bindings.ChannelManager_create_channel(peer1.chan_manager._test_only_get_ptr(), peer2.node_id, 10000, 1000, 42, 0); - assert bindings.LDKCResult_NoneAPIErrorZ_result_ok(cc_res); - bindings.CResult_NoneAPIErrorZ_free(cc_res); - - bindings.PeerManager_process_events(peer1.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - bindings.PeerManager_process_events(peer2.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - - long events = bindings.EventsProvider_call_get_and_clear_pending_events(peer1.chan_manager_events._test_only_get_ptr()); - bindings.VecOrSliceDef events_arr_info = bindings.LDKCVecTempl_Event_arr_info(events); - assert events_arr_info.datalen == 1; - bindings.LDKEvent event = bindings.LDKEvent_ref_from_ptr(events_arr_info.dataptr); - assert event instanceof bindings.LDKEvent.FundingGenerationReady; - assert ((bindings.LDKEvent.FundingGenerationReady) event).channel_value_satoshis == 10000; - assert ((bindings.LDKEvent.FundingGenerationReady) event).user_channel_id == 42; - byte[] funding_spk = bindings.get_u8_slice_bytes(((bindings.LDKEvent.FundingGenerationReady) event).output_script); + Event[] events = peer1.get_manager_events(1); + assert events[0] instanceof Event.FundingGenerationReady; + assert ((Event.FundingGenerationReady) events[0]).channel_value_satoshis == 10000; + assert ((Event.FundingGenerationReady) events[0]).user_channel_id == 42; + byte[] funding_spk = ((Event.FundingGenerationReady) events[0]).output_script; assert funding_spk.length == 34 && funding_spk[0] == 0 && funding_spk[1] == 32; // P2WSH - byte[] chan_id = ((bindings.LDKEvent.FundingGenerationReady) event).temporary_channel_id; - bindings.CVec_EventZ_free(events); + byte[] chan_id = ((Event.FundingGenerationReady) events[0]).temporary_channel_id; + + NetworkParameters bitcoinj_net = NetworkParameters.fromID(NetworkParameters.ID_MAINNET); - Transaction funding = new Transaction(NetworkParameters.fromID(NetworkParameters.ID_MAINNET)); - funding.addInput(new TransactionInput(NetworkParameters.fromID(NetworkParameters.ID_MAINNET), funding, new byte[0])); + Transaction funding = new Transaction(bitcoinj_net); + funding.addInput(new TransactionInput(bitcoinj_net, funding, new byte[0])); funding.getInputs().get(0).setWitness(new TransactionWitness(2)); // Make sure we don't complain about lack of witness funding.getInput(0).getWitness().setPush(0, new byte[]{0x1}); funding.addOutput(Coin.SATOSHI.multiply(10000), new Script(funding_spk)); - peer1.chan_manager.funding_transaction_generated(chan_id, new OutPoint(funding.getTxId().getReversedBytes(), (short) 0)); - - bindings.PeerManager_process_events(peer1.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - bindings.PeerManager_process_events(peer2.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } + Result_NoneAPIErrorZ funding_res = peer1.chan_manager.funding_transaction_generated(chan_id, funding.bitcoinSerialize()); + assert funding_res instanceof Result_NoneAPIErrorZ.Result_NoneAPIErrorZ_OK; + wait_events_processed(peer1, peer2); - events = bindings.EventsProvider_call_get_and_clear_pending_events(peer1.chan_manager_events._test_only_get_ptr()); - events_arr_info = bindings.LDKCVecTempl_Event_arr_info(events); - assert events_arr_info.datalen == 1; - event = bindings.LDKEvent_ref_from_ptr(events_arr_info.dataptr); - assert event instanceof bindings.LDKEvent.FundingBroadcastSafe; - bindings.CVec_EventZ_free(events); + assert peer1.broadcast_set.size() == 1; + assert Arrays.equals(peer1.broadcast_set.get(0), funding.bitcoinSerialize()); + peer1.broadcast_set.clear(); - Block b = new Block(NetworkParameters.fromID(NetworkParameters.ID_MAINNET), 2, Sha256Hash.ZERO_HASH, Sha256Hash.ZERO_HASH, 42, 0, 0, Arrays.asList(new Transaction[]{funding})); - peer1.connect_block(b, funding, 1); - peer2.connect_block(b, funding, 1); + Block b = new Block(bitcoinj_net, 2, Sha256Hash.ZERO_HASH, Sha256Hash.ZERO_HASH, 42, 0, 0, Arrays.asList(new Transaction[]{funding})); + peer1.connect_block(b, 1, 0); + peer2.connect_block(b, 1, 0); for (int height = 2; height < 10; height++) { - b = new Block(NetworkParameters.fromID(NetworkParameters.ID_MAINNET), 2, b.getHash(), Sha256Hash.ZERO_HASH, 42, 0, 0, Arrays.asList(new Transaction[]{funding})); - peer1.connect_block(b, null, height); - peer2.connect_block(b, null, height); + b = new Block(bitcoinj_net, 2, b.getHash(), Sha256Hash.ZERO_HASH, 42, 0, 0, Arrays.asList(new Transaction[0])); + peer1.connect_block(b, height, 0); + peer2.connect_block(b, height, 0); + } + wait_events_processed(peer1, peer2); + + peer1.chan_manager.list_channels(); + ChannelDetails[] peer1_chans = peer1.chan_manager.list_usable_channels(); + ChannelDetails[] peer2_chans = peer2.chan_manager.list_usable_channels(); + assert peer1_chans.length == 1; + assert peer2_chans.length == 1; + assert peer1_chans[0].get_channel_value_satoshis() == 10000; + assert peer1_chans[0].get_is_live(); + Option_u64Z short_chan_id = peer1_chans[0].get_short_channel_id(); + assert short_chan_id instanceof Option_u64Z.Some; + assert ((Option_u64Z.Some)short_chan_id).some == (1L << 40); // 0th output in the 0th transaction in the 1st block + assert Arrays.equals(peer1_chans[0].get_channel_id(), funding.getTxId().getReversedBytes()); + assert Arrays.equals(peer2_chans[0].get_channel_id(), funding.getTxId().getReversedBytes()); + + Result_InvoiceSignOrCreationErrorZ invoice = UtilMethods.constructor_invoice_from_channelmanager(peer2.chan_manager, peer2.keys_interface, LDKCurrency.LDKCurrency_Bitcoin, Option_u64Z.constructor_none(), "Invoice Description"); + assert invoice instanceof Result_InvoiceSignOrCreationErrorZ.Result_InvoiceSignOrCreationErrorZ_OK; + System.out.println("Got invoice: " + ((Result_InvoiceSignOrCreationErrorZ.Result_InvoiceSignOrCreationErrorZ_OK) invoice).res.to_str()); + Result_InvoiceNoneZ parsed_invoice = Invoice.constructor_from_str(((Result_InvoiceSignOrCreationErrorZ.Result_InvoiceSignOrCreationErrorZ_OK) invoice).res.to_str()); + assert parsed_invoice instanceof Result_InvoiceNoneZ.Result_InvoiceNoneZ_OK; + assert Arrays.equals(((Result_InvoiceNoneZ.Result_InvoiceNoneZ_OK) parsed_invoice).res.payment_hash(), ((Result_InvoiceSignOrCreationErrorZ.Result_InvoiceSignOrCreationErrorZ_OK) invoice).res.payment_hash()); + SignedRawInvoice signed_raw = ((Result_InvoiceNoneZ.Result_InvoiceNoneZ_OK) parsed_invoice).res.into_signed_raw(); + RawInvoice raw_invoice = signed_raw.raw_invoice(); + byte[] desc_hash = raw_invoice.hash(); + Description raw_invoice_description = raw_invoice.description(); + String description_string = raw_invoice_description.into_inner(); + assert description_string.equals("Invoice Description"); + byte[] payment_hash = ((Result_InvoiceSignOrCreationErrorZ.Result_InvoiceSignOrCreationErrorZ_OK) invoice).res.payment_hash(); + byte[] payment_secret = ((Result_InvoiceSignOrCreationErrorZ.Result_InvoiceSignOrCreationErrorZ_OK) invoice).res.payment_secret(); + + Route route = peer1.get_route(peer2.node_id, peer1_chans); + Result_NonePaymentSendFailureZ payment_res = peer1.chan_manager.send_payment(route, payment_hash, payment_secret); + assert payment_res instanceof Result_NonePaymentSendFailureZ.Result_NonePaymentSendFailureZ_OK; + wait_events_processed(peer1, peer2); + + RouteHop[][] hops = new RouteHop[1][1]; + byte[] hop_pubkey = new byte[33]; + hop_pubkey[0] = 3; + hop_pubkey[1] = 42; + hops[0][0] = RouteHop.constructor_new(hop_pubkey, NodeFeatures.constructor_known(), 42, ChannelFeatures.constructor_known(), 100, 0); + Route r2 = Route.constructor_new(hops); + payment_res = peer1.chan_manager.send_payment(r2, payment_hash, payment_secret); + assert payment_res instanceof Result_NonePaymentSendFailureZ.Result_NonePaymentSendFailureZ_Err; + + if (!use_chan_manager_constructor) { + peer1.get_monitor_events(0); + peer2.get_monitor_events(0); + } else { + // The events are combined across manager + monitors but peer1 still has no events } - bindings.PeerManager_process_events(peer1.peer_manager); - bindings.PeerManager_process_events(peer2.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - - long peer1_chans = bindings.ChannelManager_list_channels(peer1.chan_manager._test_only_get_ptr()); - long peer2_chans = bindings.ChannelManager_list_channels(peer2.chan_manager._test_only_get_ptr()); - assert bindings.vec_slice_len(peer1_chans) == 1; - assert bindings.vec_slice_len(peer2_chans) == 1; - long[] peer_1_chan_info = bindings.LDKCVecTempl_ChannelDetails_arr_info(peer1_chans); - assert peer_1_chan_info.length == 1; - assert bindings.ChannelDetails_get_channel_value_satoshis(peer_1_chan_info[0]) == 10000; - assert bindings.ChannelDetails_get_is_live(peer_1_chan_info[0]); - assert Arrays.equals(bindings.ChannelDetails_get_channel_id(peer_1_chan_info[0]), funding.getTxId().getReversedBytes()); - assert Arrays.equals(bindings.ChannelDetails_get_channel_id(bindings.LDKCVecTempl_ChannelDetails_arr_info(peer2_chans)[0]), funding.getTxId().getReversedBytes()); - bindings.CVec_ChannelDetailsZ_free(peer2_chans); - - byte[] payment_preimage = new byte[32]; - for (int i = 0; i < 32; i++) payment_preimage[i] = (byte) (i ^ 0x0f); - byte[] payment_hash = Sha256Hash.hash(payment_preimage); - long netgraph = bindings.NetGraphMsgHandler_read_locked_graph(peer1.router); - long route = bindings.get_route(peer1.node_id, bindings.LockedNetworkGraph_graph(netgraph), peer2.node_id, peer1_chans, - bindings.LDKCVecTempl_RouteHint_new(new long[0]), 1000, 42, peer1.logger); - bindings.CVec_ChannelDetailsZ_free(peer1_chans); - assert bindings.LDKCResult_RouteLightningErrorZ_result_ok(route); - bindings.LockedNetworkGraph_free(netgraph); - long payment_res = bindings.ChannelManager_send_payment(peer1.chan_manager._test_only_get_ptr(), bindings.LDKCResult_RouteLightningErrorZ_get_inner(route), payment_hash, new byte[32]); - bindings.CResult_RouteLightningErrorZ_free(route); - assert bindings.LDKCResult_NonePaymentSendFailureZ_result_ok(payment_res); - bindings.CResult_NonePaymentSendFailureZ_free(payment_res); - - bindings.PeerManager_process_events(peer1.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - bindings.PeerManager_process_events(peer2.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - bindings.PeerManager_process_events(peer1.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - - long peer2_events = bindings.EventsProvider_call_get_and_clear_pending_events(peer2.chan_manager_events._test_only_get_ptr()); - bindings.VecOrSliceDef event_arr_info = bindings.LDKCVecTempl_Event_arr_info(peer2_events); - assert event_arr_info.datalen == 1; - bindings.LDKEvent forwardable = bindings.LDKEvent_ref_from_ptr(event_arr_info.dataptr); - assert forwardable instanceof bindings.LDKEvent.PendingHTLCsForwardable; - bindings.CVec_EventZ_free(peer2_events); - bindings.ChannelManager_process_pending_htlc_forwards(peer2.chan_manager._test_only_get_ptr()); - - peer2_events = bindings.EventsProvider_call_get_and_clear_pending_events(peer2.chan_manager_events._test_only_get_ptr()); - event_arr_info = bindings.LDKCVecTempl_Event_arr_info(peer2_events); - assert event_arr_info.datalen == 1; - bindings.LDKEvent payment_recvd = bindings.LDKEvent_ref_from_ptr(event_arr_info.dataptr); - assert payment_recvd instanceof bindings.LDKEvent.PaymentReceived; - peer2.chan_manager.claim_funds(payment_preimage, new byte[32], ((bindings.LDKEvent.PaymentReceived) payment_recvd).amt); - bindings.CVec_EventZ_free(peer2_events); - - bindings.PeerManager_process_events(peer2.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - bindings.PeerManager_process_events(peer1.peer_manager); - while (!list.isEmpty()) { list.poll().join(); } - - long peer1_events = bindings.EventsProvider_call_get_and_clear_pending_events(peer1.chan_manager_events._test_only_get_ptr()); - event_arr_info = bindings.LDKCVecTempl_Event_arr_info(peer1_events); - assert event_arr_info.datalen == 1; - bindings.LDKEvent sent = bindings.LDKEvent_ref_from_ptr(event_arr_info.dataptr); - assert sent instanceof bindings.LDKEvent.PaymentSent; - assert Arrays.equals(((bindings.LDKEvent.PaymentSent) sent).payment_preimage, payment_preimage); - bindings.CVec_EventZ_free(peer1_events); - - peer1.free(); - peer2.free(); - bindings.SocketDescriptor_free(descriptor2); - bindings.SocketDescriptor_free(descriptor1.val); + if (reload_peers) { + if (use_nio_peer_handler) { + peer1.nio_peer_handler.interrupt(); + peer2.nio_peer_handler.interrupt(); + } + if (use_chan_manager_constructor) { + peer1.constructor.interrupt(); + peer2.constructor.interrupt(); + } + WeakReference op1 = new WeakReference(peer1); + peer1 = new Peer(peer1); + peer2 = new Peer(peer2); + return new TestState(op1, peer1, peer2, b.getHash()); + } + return new TestState(null, peer1, peer2, b.getHash()); } - @Test - public void test_message_handler() throws InterruptedException { - do_test_message_handler(); - while (!gc_ran) { + boolean cross_reload_ref_pollution = false; + class TestState { + private final WeakReference ref_block; + private final Peer peer1; + private final Peer peer2; + public Sha256Hash best_blockhash; + + public TestState(WeakReference ref_block, Peer peer1, Peer peer2, Sha256Hash best_blockhash) { + this.ref_block = ref_block; + this.peer1 = peer1; + this.peer2 = peer2; + this.best_blockhash = best_blockhash; + } + } + void do_test_message_handler_b(TestState state) { + GcCheck obj = new GcCheck(); + if (state.ref_block != null) { + // Ensure the original peers get freed before we move on. Note that we have to be in a different function + // scope to do so as the (at least current OpenJDK) JRE won't release anything created in the same scope. + while (!cross_reload_ref_pollution && state.ref_block.get() != null) { + System.gc(); + System.runFinalization(); + } + connect_peers(state.peer1, state.peer2); + } + wait_events_processed(state.peer1, state.peer2); + + Event[] events = state.peer2.get_manager_events(1); + assert events[0] instanceof Event.PendingHTLCsForwardable; + state.peer2.chan_manager.process_pending_htlc_forwards(); + + events = state.peer2.get_manager_events(1); + assert events[0] instanceof Event.PaymentReceived; + byte[] payment_preimage = ((Event.PaymentReceived)events[0]).payment_preimage; + assert !Arrays.equals(payment_preimage, new byte[32]); + state.peer2.chan_manager.claim_funds(payment_preimage); + wait_events_processed(state.peer1, state.peer2); + + events = state.peer1.get_manager_events(1); + assert events[0] instanceof Event.PaymentSent; + assert Arrays.equals(((Event.PaymentSent) events[0]).payment_preimage, payment_preimage); + wait_events_processed(state.peer1, state.peer2); + + ChannelDetails[] peer1_chans = state.peer1.chan_manager.list_channels(); + + if (nice_close) { + Result_NoneAPIErrorZ close_res = state.peer1.chan_manager.close_channel(peer1_chans[0].get_channel_id()); + assert close_res instanceof Result_NoneAPIErrorZ.Result_NoneAPIErrorZ_OK; + wait_events_processed(state.peer1, state.peer2); + + assert state.peer1.broadcast_set.size() == 1; + assert state.peer2.broadcast_set.size() == 1; + } else { + state.peer1.chan_manager.force_close_all_channels(); + wait_events_processed(state.peer1, state.peer2); + + assert state.peer1.broadcast_set.size() == 1; + assert state.peer2.broadcast_set.size() == 1; + + NetworkParameters bitcoinj_net = NetworkParameters.fromID(NetworkParameters.ID_MAINNET); + Transaction tx = new Transaction(bitcoinj_net, state.peer1.broadcast_set.getFirst()); + Block b = new Block(bitcoinj_net, 2, state.best_blockhash, Sha256Hash.ZERO_HASH, 42, 0, 0, + Arrays.asList(new Transaction[]{tx})); + TwoTuple[]>[] watch_outputs = state.peer2.connect_block(b, 10, 1); + if (watch_outputs != null) { // We only process watch_outputs manually when we use a manually-build Watch impl + assert watch_outputs.length == 1; + assert Arrays.equals(watch_outputs[0].a, tx.getTxId().getReversedBytes()); + assert watch_outputs[0].b.length == 2; + assert watch_outputs[0].b[0].a == 0; + assert watch_outputs[0].b[1].a == 1; + } + + for (int i = 11; i < 21; i++) { + b = new Block(bitcoinj_net, 2, b.getHash(), Sha256Hash.ZERO_HASH, 42, 0, 0, new ArrayList<>()); + state.peer2.connect_block(b, i, 0); + } + + Event[] broadcastable_event = state.peer2.get_monitor_events(1); + for (ChannelMonitor mon : state.peer2.monitors.values()) { + // This used to be buggy and double-free, so go ahead and fetch them! + byte[][] txn = mon.get_latest_holder_commitment_txn(state.peer2.logger); + } + assert broadcastable_event.length == 1; + assert broadcastable_event[0] instanceof Event.SpendableOutputs; + if (state.peer2.explicit_keys_manager != null) { + Result_TransactionNoneZ tx_res = state.peer2.explicit_keys_manager.spend_spendable_outputs(((Event.SpendableOutputs) broadcastable_event[0]).outputs, new TxOut[0], new byte[] {0x00}, 253); + assert tx_res instanceof Result_TransactionNoneZ.Result_TransactionNoneZ_OK; + Transaction built_tx = new Transaction(bitcoinj_net, ((Result_TransactionNoneZ.Result_TransactionNoneZ_OK) tx_res).res); + assert built_tx.getOutputs().size() == 1; + assert Arrays.equals(built_tx.getOutput(0).getScriptBytes(), new byte[]{0x00}); + } + } + + if (use_nio_peer_handler) { + state.peer1.peer_manager.disconnect_by_node_id(state.peer2.chan_manager.get_our_node_id(), false); + wait_events_processed(state.peer1, state.peer2); + assert state.peer1.peer_manager.get_peer_node_ids().length == 0; + assert state.peer2.peer_manager.get_peer_node_ids().length == 0; + state.peer1.nio_peer_handler.interrupt(); + state.peer2.nio_peer_handler.interrupt(); + } + + state.peer1.get_monitor_events(0); + state.peer2.get_monitor_events(0); + + if (use_chan_manager_constructor) { + state.peer1.constructor.interrupt(); + state.peer2.constructor.interrupt(); + } + } + + java.util.LinkedList> must_free_objs = new java.util.LinkedList(); + int gc_count = 0; + int gc_exp_count = 0; + class GcCheck { + GcCheck() { gc_exp_count += 1; } + @Override + protected void finalize() throws Throwable { + gc_count += 1; + super.finalize(); + } + } +} +public class HumanObjectPeerTest { + HumanObjectPeerTestInstance do_test_run(boolean nice_close, boolean use_km_wrapper, boolean use_manual_watch, boolean reload_peers, boolean break_cross_peer_refs, boolean nio_peer_handler, boolean use_chan_manager_constructor) throws InterruptedException { + HumanObjectPeerTestInstance instance = new HumanObjectPeerTestInstance(nice_close, use_km_wrapper, use_manual_watch, reload_peers, break_cross_peer_refs, nio_peer_handler, !nio_peer_handler, use_chan_manager_constructor); + HumanObjectPeerTestInstance.TestState state = instance.do_test_message_handler(); + instance.do_test_message_handler_b(state); + return instance; + } + void do_test(boolean nice_close, boolean use_km_wrapper, boolean use_manual_watch, boolean reload_peers, boolean break_cross_peer_refs, boolean nio_peer_handler, boolean use_chan_manager_constructor) throws InterruptedException { + HumanObjectPeerTestInstance instance = do_test_run(nice_close, use_km_wrapper, use_manual_watch, reload_peers, break_cross_peer_refs, nio_peer_handler, use_chan_manager_constructor); + while (instance.gc_count != instance.gc_exp_count) { System.gc(); System.runFinalization(); } - for (WeakReference o : must_free_objs) + for (WeakReference o : instance.must_free_objs) assert o.get() == null; } + @Test + public void test_message_handler() throws InterruptedException { + for (int i = 0; i < (1 << 7) - 1; i++) { + boolean nice_close = (i & (1 << 0)) != 0; + boolean use_km_wrapper = (i & (1 << 1)) != 0; + boolean use_manual_watch = (i & (1 << 2)) != 0; + boolean reload_peers = (i & (1 << 3)) != 0; + boolean break_cross_refs = (i & (1 << 4)) != 0; + boolean nio_peer_handler = (i & (1 << 5)) != 0; + boolean use_chan_manager_constructor = (i & (1 << 6)) != 0; + if (break_cross_refs && !reload_peers) { + // There are no cross refs to break without reloading peers. + continue; + } + if (use_chan_manager_constructor && use_manual_watch) { + // ChannelManagerConstructor requires a ChainMonitor as the Watch + continue; + } + System.err.println("Running test with flags " + i); + do_test(nice_close, use_km_wrapper, use_manual_watch, reload_peers, break_cross_refs, nio_peer_handler, use_chan_manager_constructor); + } + } }