+ if (key.isValid() && (key.interestOps() & SelectionKey.OP_READ) != 0 && key.isReadable()) {
+ ((Buffer)buf).clear();
+ int read = ((SocketChannel) key.channel()).read(buf);
+ if (read == -1) {
+ this.peer_manager.socket_disconnected(peer.descriptor);
+ key.cancel();
+ } else if (read > 0) {
+ ((Buffer)buf).flip();
+ // This code is quite hot during initial network graph sync, so we go a ways out of
+ // our way to avoid object allocations that'll make the GC sweat later -
+ // * when we're hot, we'll likely often be reading the full buffer, so we keep
+ // around a full-buffer-sized byte array to reuse across reads,
+ // * We use the manual memory management call logic directly in bindings instead of
+ // the nice "human-readable" wrappers. This puts us at risk of memory issues,
+ // so we indirectly ensure compile fails if the types change by writing the
+ // "human-readable" form of the same code in the dummy function below.
+ byte[] read_bytes;
+ if (read == BUF_SZ) {
+ read_bytes = max_buf_byte_object;
+ } else {
+ read_bytes = new byte[read];
+ }
+ buf.get(read_bytes, 0, read);
+ long read_result_pointer = bindings.PeerManager_read_event(
+ peer_manager_raw_pointer, peer.descriptor_raw_pointer, read_bytes);
+ if (bindings.CResult_boolPeerHandleErrorZ_is_ok(read_result_pointer)) {
+ if (bindings.CResult_boolPeerHandleErrorZ_get_ok(read_result_pointer)) {
+ key.interestOps(key.interestOps() & (~SelectionKey.OP_READ));
+ }
+ } else {
+ key.channel().close();
+ key.cancel();
+ }
+ bindings.CResult_boolPeerHandleErrorZ_free(read_result_pointer);
+ }
+ }
+ } catch (IOException ignored) {
+ try { key.channel().close(); } catch (IOException ignored2) { }
+ key.cancel();
+ peer_manager.socket_disconnected(peer.descriptor);