package org.ldk.batteries;
+import org.ldk.impl.bindings;
import org.ldk.structs.*;
import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.ref.Reference;
import java.util.LinkedList;
import java.net.SocketAddress;
import java.net.StandardSocketOptions;
public class NioPeerHandler {
private static class Peer {
SocketDescriptor descriptor;
+ long descriptor_raw_pointer;
SelectionKey key;
}
}
}
+ static private Field CommonBasePointer;
+ static {
+ try {
+ Class c = PeerManager.class.getSuperclass();
+ CommonBasePointer = c.getDeclaredField("ptr");
+ CommonBasePointer.setAccessible(true);
+ long _dummy_check = CommonBasePointer.getLong(Ping.of((short)0, (short)0));
+ } catch (NoSuchFieldException | IllegalAccessException e) {
+ throw new IllegalArgumentException(
+ "We currently use reflection to access protected fields as Java has no reasonable access controls", e);
+ }
+ }
+
private Peer setup_socket(SocketChannel chan) throws IOException {
chan.configureBlocking(false);
// Lightning tends to send a number of small messages back and forth between peers quickly, which Nagle is
(peer.key.interestOps() | SelectionKey.OP_READ) & (~SelectionKey.OP_WRITE)));
}
return written;
- } catch (IOException e) {
+ } catch (IOException|CancelledKeyException ignored) {
// Most likely the socket is disconnected, let the background thread handle it.
return 0;
}
public void disconnect_socket() {
try {
do_selector_action(() -> {
- peer.key.cancel();
+ try { peer.key.cancel(); } catch (CancelledKeyException ignored) {}
peer.key.channel().close();
});
} catch (IOException ignored) { }
@Override public long hash() { return our_id; }
});
peer.descriptor = descriptor;
+ try {
+ peer.descriptor_raw_pointer = CommonBasePointer.getLong(descriptor);
+ } catch (IllegalAccessException e) {
+ throw new IllegalArgumentException(
+ "We currently use reflection to access protected fields as Java has no reasonable access controls", e);
+ }
return peer;
}
this.peer_manager = manager;
this.selector = Selector.open();
io_thread = new Thread(() -> {
- ByteBuffer buf = ByteBuffer.allocate(8192);
+ int BUF_SZ = 16 * 1024;
+ byte[] max_buf_byte_object = new byte[BUF_SZ];
+ ByteBuffer buf = ByteBuffer.allocate(BUF_SZ);
+
+ long peer_manager_raw_pointer;
+ try {
+ peer_manager_raw_pointer = CommonBasePointer.getLong(this.peer_manager);
+ } catch (IllegalAccessException e) {
+ throw new RuntimeException(e);
+ }
while (true) {
try {
if (IS_ANDROID) {
if (chan == null) continue;
try {
Peer peer = setup_socket(chan);
+ peer.key = chan.register(this.selector, SelectionKey.OP_READ, peer);
Result_NonePeerHandleErrorZ res = this.peer_manager.new_inbound_connection(peer.descriptor);
- if (res instanceof Result_NonePeerHandleErrorZ.Result_NonePeerHandleErrorZ_OK) {
- peer.key = chan.register(this.selector, SelectionKey.OP_READ, peer);
- }
+ if (res instanceof Result_NonePeerHandleErrorZ.Result_NonePeerHandleErrorZ_Err) {
+ peer.descriptor.disconnect_socket();
+ }
} catch (IOException ignored) { }
}
continue; // There is no attachment so the rest of the loop is useless
if (key.isValid() && (key.interestOps() & SelectionKey.OP_WRITE) != 0 && key.isWritable()) {
Result_NonePeerHandleErrorZ res = this.peer_manager.write_buffer_space_avail(peer.descriptor);
if (res instanceof Result_NonePeerHandleErrorZ.Result_NonePeerHandleErrorZ_Err) {
- key.channel().close();
key.cancel();
+ key.channel().close();
}
}
if (key.isValid() && (key.interestOps() & SelectionKey.OP_READ) != 0 && key.isReadable()) {
if (read == -1) {
this.peer_manager.socket_disconnected(peer.descriptor);
key.cancel();
+ key.channel().close(); // This may throw, we read -1 so the channel should already be closed, but do this to be safe
} else if (read > 0) {
((Buffer)buf).flip();
- byte[] read_bytes = new byte[read];
+ // This code is quite hot during initial network graph sync, so we go a ways out of
+ // our way to avoid object allocations that'll make the GC sweat later -
+ // * when we're hot, we'll likely often be reading the full buffer, so we keep
+ // around a full-buffer-sized byte array to reuse across reads,
+ // * We use the manual memory management call logic directly in bindings instead of
+ // the nice "human-readable" wrappers. This puts us at risk of memory issues,
+ // so we indirectly ensure compile fails if the types change by writing the
+ // "human-readable" form of the same code in the dummy function below.
+ byte[] read_bytes;
+ if (read == BUF_SZ) {
+ read_bytes = max_buf_byte_object;
+ } else {
+ read_bytes = new byte[read];
+ }
buf.get(read_bytes, 0, read);
- Result_boolPeerHandleErrorZ res = this.peer_manager.read_event(peer.descriptor, read_bytes);
- if (res instanceof Result_boolPeerHandleErrorZ.Result_boolPeerHandleErrorZ_OK) {
- if (((Result_boolPeerHandleErrorZ.Result_boolPeerHandleErrorZ_OK) res).res) {
+ long read_result_pointer = bindings.PeerManager_read_event(
+ peer_manager_raw_pointer, peer.descriptor_raw_pointer, read_bytes);
+ if (bindings.CResult_boolPeerHandleErrorZ_is_ok(read_result_pointer)) {
+ if (bindings.CResult_boolPeerHandleErrorZ_get_ok(read_result_pointer)) {
key.interestOps(key.interestOps() & (~SelectionKey.OP_READ));
}
} else {
- key.channel().close();
key.cancel();
+ key.channel().close();
}
+ bindings.CResult_boolPeerHandleErrorZ_free(read_result_pointer);
}
}
} catch (IOException ignored) {
- try { key.channel().close(); } catch (IOException ignored2) { }
key.cancel();
+ try { key.channel().close(); } catch (IOException ignored2) { }
peer_manager.socket_disconnected(peer.descriptor);
}
} catch (CancelledKeyException e) {
io_thread.start();
}
+ // Ensure the types used in the above manual code match what they were when the code was written.
+ // Ensure the above manual bindings.* code changes if this fails to compile.
+ private void dummy_check_return_type_matches_manual_memory_code_above(Peer peer) {
+ byte[] read_bytes = new byte[32];
+ Result_boolPeerHandleErrorZ res = this.peer_manager.read_event(peer.descriptor, read_bytes);
+ }
+
/**
* Connect to a peer given their node id and socket address. Blocks until a connection is established (or returns
* IOException) and then the connection handling runs in the background.
*/
public void connect(byte[] their_node_id, SocketAddress remote, int timeout_ms) throws IOException {
SocketChannel chan = SocketChannel.open();
- chan.configureBlocking(false);
- Selector open_selector = Selector.open();
- chan.register(open_selector, SelectionKey.OP_CONNECT);
- if (!chan.connect(remote)) {
- open_selector.select(timeout_ms);
+ boolean connected;
+ try {
+ chan.configureBlocking(false);
+ Selector open_selector = Selector.open();
+ chan.register(open_selector, SelectionKey.OP_CONNECT);
+ if (!chan.connect(remote)) {
+ open_selector.select(timeout_ms);
+ }
+ connected = chan.finishConnect();
+ } catch (IOException e) {
+ try { chan.close(); } catch (IOException _e) { }
+ throw e;
}
- if (!chan.finishConnect()) { // Note that this may throw its own IOException if we failed for another reason
+ if (!connected) {
+ try { chan.close(); } catch (IOException _e) { }
throw new IOException("Timed out");
}
Peer peer = setup_socket(chan);
+ do_selector_action(() -> peer.key = chan.register(this.selector, SelectionKey.OP_READ, peer));
Result_CVec_u8ZPeerHandleErrorZ res = this.peer_manager.new_outbound_connection(their_node_id, peer.descriptor);
if (res instanceof Result_CVec_u8ZPeerHandleErrorZ.Result_CVec_u8ZPeerHandleErrorZ_OK) {
byte[] initial_bytes = ((Result_CVec_u8ZPeerHandleErrorZ.Result_CVec_u8ZPeerHandleErrorZ_OK) res).res;
if (chan.write(ByteBuffer.wrap(initial_bytes)) != initial_bytes.length) {
+ peer.descriptor.disconnect_socket();
+ this.peer_manager.socket_disconnected(peer.descriptor);
throw new IOException("We assume TCP socket buffer is at least a single packet in length");
}
- do_selector_action(() -> peer.key = chan.register(this.selector, SelectionKey.OP_READ, peer));
} else {
+ peer.descriptor.disconnect_socket();
throw new IOException("LDK rejected outbound connection. This likely shouldn't ever happen.");
}
}
}
} catch (IOException ignored) {}
}
+ Reference.reachabilityFence(this.peer_manager); // Almost certainly overkill, but no harm in it
}
/**