8074119: [AARCH64] stage repo misses fixes from several Hotspot changes
authoradinn
Mon, 02 Mar 2015 10:31:52 -0800
changeset 29195 7d6208ea1775
parent 29194 76f2d51bafc3
child 29213 340e3e8b810b
8074119: [AARCH64] stage repo misses fixes from several Hotspot changes Summary: add shared code changes from 8059606, 8069230, 8068976, 8068977, 8072911 and 8071805 Reviewed-by: aph, kvn
hotspot/src/cpu/aarch64/vm/aarch64.ad
hotspot/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp
hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
hotspot/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp
hotspot/src/cpu/aarch64/vm/icBuffer_aarch64.cpp
hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp
hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp
hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp
hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.cpp
hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.hpp
hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp
hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp
hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad	Mon Mar 02 10:31:52 2015 -0800
@@ -2341,25 +2341,6 @@
 
   // prefetch encodings
 
-  enc_class aarch64_enc_prefetchr(memory mem) %{
-    MacroAssembler _masm(&cbuf);
-    Register base = as_Register($mem$$base);
-    int index = $mem$$index;
-    int scale = $mem$$scale;
-    int disp = $mem$$disp;
-    if (index == -1) {
-      __ prfm(Address(base, disp), PLDL1KEEP);
-    } else {
-      Register index_reg = as_Register(index);
-      if (disp == 0) {
-        __ prfm(Address(base, index_reg, Address::lsl(scale)), PLDL1KEEP);
-      } else {
-        __ lea(rscratch1, Address(base, disp));
-        __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PLDL1KEEP);
-      }
-    }
-  %}
-
   enc_class aarch64_enc_prefetchw(memory mem) %{
     MacroAssembler _masm(&cbuf);
     Register base = as_Register($mem$$base);
@@ -2380,26 +2361,6 @@
     }
   %}
 
-  enc_class aarch64_enc_prefetchnta(memory mem) %{
-    MacroAssembler _masm(&cbuf);
-    Register base = as_Register($mem$$base);
-    int index = $mem$$index;
-    int scale = $mem$$scale;
-    int disp = $mem$$disp;
-    if (index == -1) {
-      __ prfm(Address(base, disp), PSTL1STRM);
-    } else {
-      Register index_reg = as_Register(index);
-      if (disp == 0) {
-        __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1STRM);
-        __ nop();
-      } else {
-        __ lea(rscratch1, Address(base, disp));
-	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1STRM);
-      }
-    }
-  %}
-
   enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
     MacroAssembler _masm(&cbuf);
     Register cnt_reg = as_Register($cnt$$reg);
@@ -5887,18 +5848,7 @@
 // prefetch instructions
 // Must be safe to execute with invalid address (cannot fault).
 
-instruct prefetchr( memory mem ) %{
-  match(PrefetchRead mem);
-
-  ins_cost(INSN_COST);
-  format %{ "prfm $mem, PLDL1KEEP\t# Prefetch into level 1 cache read keep" %}
-
-  ins_encode( aarch64_enc_prefetchr(mem) );
-
-  ins_pipe(iload_prefetch);
-%}
-
-instruct prefetchw( memory mem ) %{
+instruct prefetchalloc( memory mem ) %{
   match(PrefetchAllocation mem);
 
   ins_cost(INSN_COST);
@@ -5909,17 +5859,6 @@
   ins_pipe(iload_prefetch);
 %}
 
-instruct prefetchnta( memory mem ) %{
-  match(PrefetchWrite mem);
-
-  ins_cost(INSN_COST);
-  format %{ "prfm $mem, PSTL1STRM\t# Prefetch into level 1 cache write streaming" %}
-
-  ins_encode( aarch64_enc_prefetchnta(mem) );
-
-  ins_pipe(iload_prefetch);
-%}
-
 //  ---------------- volatile loads and stores ----------------
 
 // Load Byte (8 bit signed)
--- a/hotspot/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/c1_CodeStubs_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -364,16 +364,6 @@
   __ b(_continuation);
 }
 
-jbyte* G1PostBarrierStub::_byte_map_base = NULL;
-
-jbyte* G1PostBarrierStub::byte_map_base_slow() {
-  BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->is_a(BarrierSet::G1SATBCTLogging),
-         "Must be if we're using this.");
-  return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
-}
-
-
 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(addr()->is_register(), "Precondition.");
--- a/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -1057,12 +1057,6 @@
 }
 
 
-void LIR_Assembler::prefetchr(LIR_Opr src) { Unimplemented(); }
-
-
-void LIR_Assembler::prefetchw(LIR_Opr src) { Unimplemented(); }
-
-
 int LIR_Assembler::array_element_size(BasicType type) const {
   int elem_size = type2aelembytes(type);
   return exact_log2(elem_size);
--- a/hotspot/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/c1_MacroAssembler_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -34,6 +34,7 @@
 #include "runtime/basicLock.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/os.hpp"
+#include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 
 void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
--- a/hotspot/src/cpu/aarch64/vm/icBuffer_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/icBuffer_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -32,7 +32,6 @@
 #include "memory/resourceArea.hpp"
 #include "nativeInst_aarch64.hpp"
 #include "oops/oop.inline.hpp"
-#include "oops/oop.inline2.hpp"
 
 int InlineCacheBuffer::ic_stub_code_size() {
   return (MacroAssembler::far_branches() ? 6 : 4) * NativeInstruction::instruction_size;
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -1409,15 +1409,17 @@
 
 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
-                                                        int increment, int mask,
-                                                        Register scratch, bool preloaded,
-                                                        Condition cond, Label* where) {
+                                                        int increment, Address mask,
+                                                        Register scratch, Register scratch2,
+                                                        bool preloaded, Condition cond,
+                                                        Label* where) {
   if (!preloaded) {
     ldrw(scratch, counter_addr);
   }
   add(scratch, scratch, increment);
   strw(scratch, counter_addr);
-  ands(scratch, scratch, mask);
+  ldrw(scratch2, mask);
+  ands(scratch, scratch, scratch2);
   br(cond, *where);
 }
 
--- a/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/interp_masm_aarch64.hpp	Mon Mar 02 10:31:52 2015 -0800
@@ -228,9 +228,10 @@
   void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
                              bool decrement = false);
   void increment_mask_and_jump(Address counter_addr,
-                               int increment, int mask,
-                               Register scratch, bool preloaded,
-                               Condition cond, Label* where);
+                               int increment, Address mask,
+                               Register scratch, Register scratch2,
+                               bool preloaded, Condition cond,
+                               Label* where);
   void set_mdp_flag_at(Register mdp_in, int flag_constant);
   void test_mdp_data_at(Register mdp_in, int offset, Register value,
                         Register test_value_out,
--- a/hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/methodHandles_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -25,6 +25,7 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.hpp"
+#include "classfile/javaClasses.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
 #include "memory/allocation.inline.hpp"
--- a/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -312,8 +312,6 @@
   ShouldNotCallThis();
 }
 
-bool NativeInstruction::is_dtrace_trap() { return false; }
-
 address NativeCallTrampolineStub::destination(nmethod *nm) const {
   return ptr_at(data_offset);
 }
--- a/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/nativeInst_aarch64.hpp	Mon Mar 02 10:31:52 2015 -0800
@@ -56,7 +56,6 @@
  public:
   enum { instruction_size = 4 };
   inline bool is_nop();
-  bool is_dtrace_trap();
   inline bool is_illegal();
   inline bool is_return();
   bool is_jump();
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -2182,32 +2182,6 @@
 
 }
 
-
-#ifdef HAVE_DTRACE_H
-// ---------------------------------------------------------------------------
-// Generate a dtrace nmethod for a given signature.  The method takes arguments
-// in the Java compiled code convention, marshals them to the native
-// abi and then leaves nops at the position you would expect to call a native
-// function. When the probe is enabled the nops are replaced with a trap
-// instruction that dtrace inserts and the trace will cause a notification
-// to dtrace.
-//
-// The probes are only able to take primitive types and java/lang/String as
-// arguments.  No other java types are allowed. Strings are converted to utf8
-// strings so that from dtrace point of view java strings are converted to C
-// strings. There is an arbitrary fixed limit on the total space that a method
-// can use for converting the strings. (256 chars per string in the signature).
-// So any java string larger then this is truncated.
-
-static int  fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
-static bool offsets_initialized = false;
-
-
-nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
-                                                methodHandle method) { Unimplemented(); return 0; }
-
-#endif // HAVE_DTRACE_H
-
 // this function returns the adjust size (in number of words) to a c2i adapter
 // activation for use during deoptimization
 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
--- a/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/templateInterpreter_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -335,7 +335,6 @@
   // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
   if (TieredCompilation) {
     int increment = InvocationCounter::count_increment;
-    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
     Label no_mdo;
     if (ProfileInterpreter) {
       // Are we profiling?
@@ -344,7 +343,8 @@
       // Increment counter in the MDO
       const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
                                                 in_bytes(InvocationCounter::counter_offset()));
-      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow);
+      const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
+      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
       __ b(done);
     }
     __ bind(no_mdo);
@@ -353,9 +353,10 @@
                   MethodCounters::invocation_counter_offset() +
                   InvocationCounter::counter_offset());
     __ get_method_counters(rmethod, rscratch2, done);
-    __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow);
+    const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
+    __ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
     __ bind(done);
-  } else {
+  } else { // not TieredCompilation
     const Address backedge_counter(rscratch2,
                   MethodCounters::backedge_counter_offset() +
                   InvocationCounter::counter_offset());
@@ -385,11 +386,9 @@
 
     if (ProfileInterpreter && profile_method != NULL) {
       // Test to see if we should create a method data oop
-      unsigned long offset;
-      __ adrp(rscratch2, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit),
-              offset);
-      __ ldrw(rscratch2, Address(rscratch2, offset));
-      __ cmp(r0, rscratch2);
+      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
+      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
+      __ cmpw(r0, rscratch2);
       __ br(Assembler::LT, *profile_method_continue);
 
       // if no method data exists, go to profile_method
@@ -397,11 +396,8 @@
     }
 
     {
-      unsigned long offset;
-      __ adrp(rscratch2,
-              ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit),
-              offset);
-      __ ldrw(rscratch2, Address(rscratch2, offset));
+      __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
+      __ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
       __ cmpw(r0, rscratch2);
       __ br(Assembler::HS, *overflow);
     }
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Mon Mar 02 10:09:33 2015 -0800
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Mon Mar 02 10:31:52 2015 -0800
@@ -205,7 +205,6 @@
       }
       break;
     case BarrierSet::ModRef:
-    case BarrierSet::Other:
       if (val == noreg) {
         __ store_heap_oop_null(obj);
       } else {
@@ -1650,7 +1649,6 @@
     if (TieredCompilation) {
       Label no_mdo;
       int increment = InvocationCounter::count_increment;
-      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
       if (ProfileInterpreter) {
         // Are we profiling?
         __ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
@@ -1658,16 +1656,18 @@
         // Increment the MDO backedge counter
         const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
                                            in_bytes(InvocationCounter::counter_offset()));
+        const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
-                                   r0, false, Assembler::EQ, &backedge_counter_overflow);
+                                   r0, rscratch1, false, Assembler::EQ, &backedge_counter_overflow);
         __ b(dispatch);
       }
       __ bind(no_mdo);
       // Increment backedge counter in MethodCounters*
       __ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
+      const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
       __ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
-                                 r0, false, Assembler::EQ, &backedge_counter_overflow);
-    } else {
+                                 r0, rscratch2, false, Assembler::EQ, &backedge_counter_overflow);
+    } else { // not TieredCompilation
       // increment counter
       __ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
       __ ldrw(r0, Address(rscratch2, be_offset));        // load backedge counter
@@ -1680,8 +1680,7 @@
 
       if (ProfileInterpreter) {
         // Test to see if we should create a method data oop
-        __ lea(rscratch1, ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
-        __ ldrw(rscratch1, rscratch1);
+        __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
         __ cmpw(r0, rscratch1);
         __ br(Assembler::LT, dispatch);
 
@@ -1690,8 +1689,7 @@
 
         if (UseOnStackReplacement) {
           // check for overflow against w1 which is the MDO taken count
-          __ lea(rscratch1, ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
-          __ ldrw(rscratch1, rscratch1);
+          __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
           __ cmpw(r1, rscratch1);
           __ br(Assembler::LO, dispatch); // Intel == Assembler::below
 
@@ -1710,8 +1708,7 @@
         if (UseOnStackReplacement) {
           // check for overflow against w0, which is the sum of the
           // counters
-          __ lea(rscratch1, ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
-          __ ldrw(rscratch1, rscratch1);
+          __ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
           __ cmpw(r0, rscratch1);
           __ br(Assembler::HS, backedge_counter_overflow); // Intel == Assembler::aboveEqual
         }