--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Thu Oct 17 20:27:44 2019 +0100
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Thu Oct 17 20:53:35 2019 +0100
@@ -34,6 +34,7 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
+#include "oops/klass.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
@@ -1303,6 +1304,97 @@
}
}
+// Registers need to be saved for runtime call
+static Register caller_saved_registers[] = {
+ rcx, rdx, rsi, rdi
+};
+
+// Save caller saved registers except r1 and r2
+static void save_registers_except(MacroAssembler* masm, Register r1, Register r2) {
+ int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
+ for (int index = 0; index < reg_len; index ++) {
+ Register this_reg = caller_saved_registers[index];
+ if (this_reg != r1 && this_reg != r2) {
+ __ push(this_reg);
+ }
+ }
+}
+
+// Restore caller saved registers except r1 and r2
+static void restore_registers_except(MacroAssembler* masm, Register r1, Register r2) {
+ int reg_len = (int)(sizeof(caller_saved_registers) / sizeof(Register));
+ for (int index = reg_len - 1; index >= 0; index --) {
+ Register this_reg = caller_saved_registers[index];
+ if (this_reg != r1 && this_reg != r2) {
+ __ pop(this_reg);
+ }
+ }
+}
+
+// Pin object, return pinned object or null in rax
+static void gen_pin_object(MacroAssembler* masm,
+ Register thread, VMRegPair reg) {
+ __ block_comment("gen_pin_object {");
+
+ Label is_null;
+ Register tmp_reg = rax;
+ VMRegPair tmp(tmp_reg->as_VMReg());
+ if (reg.first()->is_stack()) {
+ // Load the arg up from the stack
+ simple_move32(masm, reg, tmp);
+ reg = tmp;
+ } else {
+ __ movl(tmp_reg, reg.first()->as_Register());
+ }
+ __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+ __ jccb(Assembler::equal, is_null);
+
+ // Save registers that may be used by runtime call
+ Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
+ save_registers_except(masm, arg, thread);
+
+ __ call_VM_leaf(
+ CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
+ thread, reg.first()->as_Register());
+
+ // Restore saved registers
+ restore_registers_except(masm, arg, thread);
+
+ __ bind(is_null);
+ __ block_comment("} gen_pin_object");
+}
+
+// Unpin object
+static void gen_unpin_object(MacroAssembler* masm,
+ Register thread, VMRegPair reg) {
+ __ block_comment("gen_unpin_object {");
+ Label is_null;
+
+ // temp register
+ __ push(rax);
+ Register tmp_reg = rax;
+ VMRegPair tmp(tmp_reg->as_VMReg());
+
+ simple_move32(masm, reg, tmp);
+
+ __ testptr(rax, rax);
+ __ jccb(Assembler::equal, is_null);
+
+ // Save registers that may be used by runtime call
+ Register arg = reg.first()->is_Register() ? reg.first()->as_Register() : noreg;
+ save_registers_except(masm, arg, thread);
+
+ __ call_VM_leaf(
+ CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
+ thread, rax);
+
+ // Restore saved registers
+ restore_registers_except(masm, arg, thread);
+ __ bind(is_null);
+ __ pop(rax);
+ __ block_comment("} gen_unpin_object");
+}
+
// Check GCLocker::needs_gc and enter the runtime if it's true. This
// keeps a new JNI critical region from starting until a GC has been
// forced. Save down any oops in registers and describe them in an
@@ -1416,8 +1508,7 @@
Register temp_reg = rbx; // not part of any compiled calling seq
if (VerifyOops) {
for (int i = 0; i < method->size_of_parameters(); i++) {
- if (sig_bt[i] == T_OBJECT ||
- sig_bt[i] == T_ARRAY) {
+ if (is_reference_type(sig_bt[i])) {
VMReg r = regs[i].first();
assert(r->is_valid(), "bad oop arg");
if (r->is_stack()) {
@@ -1524,7 +1615,8 @@
int compile_id,
BasicType* in_sig_bt,
VMRegPair* in_regs,
- BasicType ret_type) {
+ BasicType ret_type,
+ address critical_entry) {
if (method->is_method_handle_intrinsic()) {
vmIntrinsics::ID iid = method->intrinsic_id();
intptr_t start = (intptr_t)__ pc();
@@ -1547,7 +1639,7 @@
(OopMapSet*)NULL);
}
bool is_critical_native = true;
- address native_func = method->critical_native_function();
+ address native_func = critical_entry;
if (native_func == NULL) {
native_func = method->native_function();
is_critical_native = false;
@@ -1836,7 +1928,7 @@
__ get_thread(thread);
- if (is_critical_native) {
+ if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
check_needs_gc_for_critical_native(masm, thread, stack_slots, total_c_args, total_in_args,
oop_handle_offset, oop_maps, in_regs, in_sig_bt);
}
@@ -1874,6 +1966,11 @@
//
OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
+ // Inbound arguments that need to be pinned for critical natives
+ GrowableArray<int> pinned_args(total_in_args);
+ // Current stack slot for storing register based array argument
+ int pinned_slot = oop_handle_offset;
+
// Mark location of rbp,
// map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
@@ -1885,7 +1982,28 @@
switch (in_sig_bt[i]) {
case T_ARRAY:
if (is_critical_native) {
- unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
+ VMRegPair in_arg = in_regs[i];
+ if (Universe::heap()->supports_object_pinning()) {
+ // gen_pin_object handles save and restore
+ // of any clobbered registers
+ gen_pin_object(masm, thread, in_arg);
+ pinned_args.append(i);
+
+ // rax has pinned array
+ VMRegPair result_reg(rax->as_VMReg());
+ if (!in_arg.first()->is_stack()) {
+ assert(pinned_slot <= stack_slots, "overflow");
+ simple_move32(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
+ pinned_slot += VMRegImpl::slots_per_word;
+ } else {
+ // Write back pinned value, it will be used to unpin this argument
+ __ movptr(Address(rbp, reg2offset_in(in_arg.first())), result_reg.first()->as_Register());
+ }
+ // We have the array in register, use it
+ in_arg = result_reg;
+ }
+
+ unpack_array_argument(masm, in_arg, in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
c_arg++;
break;
}
@@ -2078,6 +2196,26 @@
default : ShouldNotReachHere();
}
+ // unpin pinned arguments
+ pinned_slot = oop_handle_offset;
+ if (pinned_args.length() > 0) {
+ // save return value that may be overwritten otherwise.
+ save_native_result(masm, ret_type, stack_slots);
+ for (int index = 0; index < pinned_args.length(); index ++) {
+ int i = pinned_args.at(index);
+ assert(pinned_slot <= stack_slots, "overflow");
+ if (!in_regs[i].first()->is_stack()) {
+ int offset = pinned_slot * VMRegImpl::stack_slot_size;
+ __ movl(in_regs[i].first()->as_Register(), Address(rsp, offset));
+ pinned_slot += VMRegImpl::slots_per_word;
+ }
+ // gen_pin_object handles save and restore
+ // of any other clobbered registers
+ gen_unpin_object(masm, thread, in_regs[i]);
+ }
+ restore_native_result(masm, ret_type, stack_slots);
+ }
+
// Switch thread to "native transition" state before reading the synchronization state.
// This additional state is necessary because reading and testing the synchronization
// state is not atomic w.r.t. GC, as this scenario demonstrates:
@@ -2217,7 +2355,7 @@
__ reset_last_Java_frame(thread, false);
// Unbox oop result, e.g. JNIHandles::resolve value.
- if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
+ if (is_reference_type(ret_type)) {
__ resolve_jobject(rax /* value */,
thread /* thread */,
rcx /* tmp */);