src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
changeset 51267 2cd8bbccbd2d
parent 49657 45071514f87a
child 51285 1129f9833589
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Tue Jul 31 16:49:51 2018 +0100
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Tue Jul 31 13:12:06 2018 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1434,6 +1434,64 @@
   }
 }
 
+// Pin object, return pinned object or null in rax
+static void gen_pin_object(MacroAssembler* masm,
+                           VMRegPair reg) {
+  __ block_comment("gen_pin_object {");
+
+  // rax always contains oop, either incoming or
+  // pinned.
+  Register tmp_reg = rax;
+
+  Label is_null;
+  VMRegPair tmp;
+  VMRegPair in_reg = reg;
+
+  tmp.set_ptr(tmp_reg->as_VMReg());
+  if (reg.first()->is_stack()) {
+    // Load the arg up from the stack
+    move_ptr(masm, reg, tmp);
+    reg = tmp;
+  } else {
+    __ movptr(rax, reg.first()->as_Register());
+  }
+  __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
+  __ jccb(Assembler::equal, is_null);
+
+  if (reg.first()->as_Register() != c_rarg1) {
+    __ movptr(c_rarg1, reg.first()->as_Register());
+  }
+
+  __ call_VM_leaf(
+    CAST_FROM_FN_PTR(address, SharedRuntime::pin_object),
+    r15_thread, c_rarg1);
+
+  __ bind(is_null);
+  __ block_comment("} gen_pin_object");
+}
+
+// Unpin object
+static void gen_unpin_object(MacroAssembler* masm,
+                             VMRegPair reg) {
+  __ block_comment("gen_unpin_object {");
+  Label is_null;
+
+  if (reg.first()->is_stack()) {
+    __ movptr(c_rarg1, Address(rbp, reg2offset_in(reg.first())));
+  } else if (reg.first()->as_Register() != c_rarg1) {
+    __ movptr(c_rarg1, reg.first()->as_Register());
+  }
+
+  __ testptr(c_rarg1, c_rarg1);
+  __ jccb(Assembler::equal, is_null);
+
+  __ call_VM_leaf(
+    CAST_FROM_FN_PTR(address, SharedRuntime::unpin_object),
+    r15_thread, c_rarg1);
+
+  __ bind(is_null);
+  __ block_comment("} gen_unpin_object");
+}
 
 // Check GCLocker::needs_gc and enter the runtime if it's true.  This
 // keeps a new JNI critical region from starting until a GC has been
@@ -2129,7 +2187,7 @@
 
   const Register oop_handle_reg = r14;
 
-  if (is_critical_native) {
+  if (is_critical_native && !Universe::heap()->supports_object_pinning()) {
     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
   }
@@ -2186,6 +2244,11 @@
   // the incoming and outgoing registers are offset upwards and for
   // critical natives they are offset down.
   GrowableArray<int> arg_order(2 * total_in_args);
+  // Inbound arguments that need to be pinned for critical natives
+  GrowableArray<int> pinned_args(total_in_args);
+  // Current stack slot for storing register based array argument
+  int pinned_slot = oop_handle_offset;
+
   VMRegPair tmp_vmreg;
   tmp_vmreg.set2(rbx->as_VMReg());
 
@@ -2233,6 +2296,23 @@
     switch (in_sig_bt[i]) {
       case T_ARRAY:
         if (is_critical_native) {
+          // pin before unpack
+          if (Universe::heap()->supports_object_pinning()) {
+            save_args(masm, total_c_args, 0, out_regs);
+            gen_pin_object(masm, in_regs[i]);
+            pinned_args.append(i);
+            restore_args(masm, total_c_args, 0, out_regs);
+
+            // rax has pinned array
+            VMRegPair result_reg;
+            result_reg.set_ptr(rax->as_VMReg());
+            move_ptr(masm, result_reg, in_regs[i]);
+            if (!in_regs[i].first()->is_stack()) {
+              assert(pinned_slot <= stack_slots, "overflow");
+              move_ptr(masm, result_reg, VMRegImpl::stack2reg(pinned_slot));
+              pinned_slot += VMRegImpl::slots_per_word;
+            }
+          }
           unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
           c_arg++;
 #ifdef ASSERT
@@ -2449,6 +2529,24 @@
   default       : ShouldNotReachHere();
   }
 
+  // unpin pinned arguments
+  pinned_slot = oop_handle_offset;
+  if (pinned_args.length() > 0) {
+    // save return value that may be overwritten otherwise.
+    save_native_result(masm, ret_type, stack_slots);
+    for (int index = 0; index < pinned_args.length(); index ++) {
+      int i = pinned_args.at(index);
+      assert(pinned_slot <= stack_slots, "overflow");
+      if (!in_regs[i].first()->is_stack()) {
+        int offset = pinned_slot * VMRegImpl::stack_slot_size;
+        __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
+        pinned_slot += VMRegImpl::slots_per_word;
+      }
+      gen_unpin_object(masm, in_regs[i]);
+    }
+    restore_native_result(masm, ret_type, stack_slots);
+  }
+
   // Switch thread to "native transition" state before reading the synchronization state.
   // This additional state is necessary because reading and testing the synchronization
   // state is not atomic w.r.t. GC, as this scenario demonstrates: