Merge
authoradlertz
Thu, 18 Sep 2014 19:30:59 +0200
changeset 26708 605b2e146fa5
parent 26691 40ea2c41f53b (current diff)
parent 26707 92f8c836b168 (diff)
child 26709 87aa4286a4d7
Merge
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Sep 18 19:30:59 2014 +0200
@@ -107,7 +107,6 @@
   private Runtime1     runtime1;
   /** These constants come from globalDefinitions.hpp */
   private int          invocationEntryBCI;
-  private int          invalidOSREntryBCI;
   private ReversePtrs  revPtrs;
   private VMRegImpl    vmregImpl;
   private int          reserveForAllocationPrefetch;
@@ -295,7 +294,6 @@
 
     stackBias    = db.lookupIntConstant("STACK_BIAS").intValue();
     invocationEntryBCI = db.lookupIntConstant("InvocationEntryBci").intValue();
-    invalidOSREntryBCI = db.lookupIntConstant("InvalidOSREntryBci").intValue();
 
     // We infer the presence of C1 or C2 from a couple of fields we
     // already have present in the type database
@@ -733,11 +731,6 @@
     return invocationEntryBCI;
   }
 
-  /** FIXME: figure out where to stick this */
-  public int getInvalidOSREntryBCI() {
-    return invalidOSREntryBCI;
-  }
-
   // FIXME: figure out where to stick this
   public boolean wizardMode() {
     return true;
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -1166,9 +1166,9 @@
   beq(CCR0, overflow_with_error);
 
   // Has the nmethod been invalidated already?
-  lwz(Rtmp, nmethod::entry_bci_offset(), R3_RET);
-  cmpwi(CCR0, Rtmp, InvalidOSREntryBci);
-  beq(CCR0, overflow_with_error);
+  lbz(Rtmp, nmethod::state_offset(), R3_RET);
+  cmpwi(CCR0, Rtmp, nmethod::in_use);
+  bne(CCR0, overflow_with_error);
 
   // Migrate the interpreter frame off of the stack.
   // We can use all registers because we will not return to interpreter from this point.
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -1674,9 +1674,9 @@
       __ beq(CCR0, Lforward);
 
       // Has the nmethod been invalidated already?
-      __ lwz(R0, nmethod::entry_bci_offset(), R3_RET);
-      __ cmpwi(CCR0, R0, InvalidOSREntryBci);
-      __ beq(CCR0, Lforward);
+      __ lbz(R0, nmethod::state_offset(), R3_RET);
+      __ cmpwi(CCR0, R0, nmethod::in_use);
+      __ bne(CCR0, Lforward);
 
       // Migrate the interpreter frame off of the stack.
       // We can use all registers because we will not return to interpreter from this point.
--- a/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/cpu/sparc/vm/interp_masm_sparc.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -2407,8 +2407,8 @@
   br_null_short(O0, Assembler::pn, overflow_with_error);
 
   // Has the nmethod been invalidated already?
-  ld(O0, nmethod::entry_bci_offset(), O2);
-  cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error);
+  ldub(O0, nmethod::state_offset(), O2);
+  cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, overflow_with_error);
 
   // migrate the interpreter frame off of the stack
 
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -1636,8 +1636,8 @@
       __ br_null_short(O0, Assembler::pn, Lforward);
 
       // Has the nmethod been invalidated already?
-      __ ld(O0, nmethod::entry_bci_offset(), O2);
-      __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward);
+      __ ldub(O0, nmethod::state_offset(), O2);
+      __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward);
 
       // migrate the interpreter frame off of the stack
 
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -1724,9 +1724,8 @@
       __ testptr(rax, rax);                      // test result
       __ jcc(Assembler::zero, dispatch);         // no osr if null
       // nmethod may have been invalidated (VM may block upon call_VM return)
-      __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
-      __ cmpl(rcx, InvalidOSREntryBci);
-      __ jcc(Assembler::equal, dispatch);
+      __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
+      __ jcc(Assembler::notEqual, dispatch);
 
       // We have the address of an on stack replacement routine in rax,
       // We need to prepare to execute the OSR method. First we must
@@ -1734,8 +1733,7 @@
 
       __ mov(rbx, rax);                             // save the nmethod
 
-      const Register thread = rcx;
-      __ get_thread(thread);
+      __ get_thread(rcx);
       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
       // rax, is OSR buffer, move it to expected parameter location
       __ mov(rcx, rax);
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -1751,9 +1751,8 @@
       __ testptr(rax, rax);                        // test result
       __ jcc(Assembler::zero, dispatch);         // no osr if null
       // nmethod may have been invalidated (VM may block upon call_VM return)
-      __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
-      __ cmpl(rcx, InvalidOSREntryBci);
-      __ jcc(Assembler::equal, dispatch);
+      __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
+      __ jcc(Assembler::notEqual, dispatch);
 
       // We have the address of an on stack replacement routine in eax
       // We need to prepare to execute the OSR method. First we must
--- a/hotspot/src/share/vm/ci/ciField.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciField.hpp	Thu Sep 18 19:30:59 2014 +0200
@@ -39,7 +39,6 @@
   CI_PACKAGE_ACCESS
   friend class ciEnv;
   friend class ciInstanceKlass;
-  friend class NonStaticFieldFiller;
 
 private:
   ciFlags          _flags;
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -44,7 +44,7 @@
 //
 // Loaded instance klass.
 ciInstanceKlass::ciInstanceKlass(KlassHandle h_k) :
-  ciKlass(h_k), _non_static_fields(NULL)
+  ciKlass(h_k)
 {
   assert(get_Klass()->oop_is_instance(), "wrong type");
   assert(get_instanceKlass()->is_loaded(), "must be at least loaded");
@@ -407,37 +407,6 @@
   return field;
 }
 
-// ------------------------------------------------------------------
-// ciInstanceKlass::non_static_fields.
-
-class NonStaticFieldFiller: public FieldClosure {
-  GrowableArray<ciField*>* _arr;
-  ciEnv* _curEnv;
-public:
-  NonStaticFieldFiller(ciEnv* curEnv, GrowableArray<ciField*>* arr) :
-    _curEnv(curEnv), _arr(arr)
-  {}
-  void do_field(fieldDescriptor* fd) {
-    ciField* field = new (_curEnv->arena()) ciField(fd);
-    _arr->append(field);
-  }
-};
-
-GrowableArray<ciField*>* ciInstanceKlass::non_static_fields() {
-  if (_non_static_fields == NULL) {
-    VM_ENTRY_MARK;
-    ciEnv* curEnv = ciEnv::current();
-    InstanceKlass* ik = get_instanceKlass();
-    int max_n_fields = ik->java_fields_count();
-
-    Arena* arena = curEnv->arena();
-    _non_static_fields =
-      new (arena) GrowableArray<ciField*>(arena, max_n_fields, 0, NULL);
-    NonStaticFieldFiller filler(curEnv, _non_static_fields);
-    ik->do_nonstatic_fields(&filler);
-  }
-  return _non_static_fields;
-}
 
 static int sort_field_by_offset(ciField** a, ciField** b) {
   return (*a)->offset_in_bytes() - (*b)->offset_in_bytes();
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp	Thu Sep 18 19:30:59 2014 +0200
@@ -71,8 +71,6 @@
   //   Itsef: more than one implementors.
   ciInstanceKlass*       _implementor;
 
-  GrowableArray<ciField*>* _non_static_fields;
-
 protected:
   ciInstanceKlass(KlassHandle h_k);
   ciInstanceKlass(ciSymbol* name, jobject loader, jobject protection_domain);
@@ -181,8 +179,6 @@
   ciField* get_field_by_offset(int field_offset, bool is_static);
   ciField* get_field_by_name(ciSymbol* name, ciSymbol* signature, bool is_static);
 
-  GrowableArray<ciField*>* non_static_fields();
-
   // total number of nonstatic fields (including inherited):
   int nof_nonstatic_fields() {
     if (_nonstatic_fields == NULL)
--- a/hotspot/src/share/vm/code/nmethod.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -1364,8 +1364,6 @@
   // Remove from list of active nmethods
   if (method() != NULL)
     method()->method_holder()->remove_osr_nmethod(this);
-  // Set entry as invalid
-  _entry_bci = InvalidOSREntryBci;
 }
 
 void nmethod::log_state_change() const {
--- a/hotspot/src/share/vm/code/nmethod.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Thu Sep 18 19:30:59 2014 +0200
@@ -202,13 +202,6 @@
   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
 #endif
 
-  enum { in_use       = 0,   // executable nmethod
-         not_entrant  = 1,   // marked for deoptimization but activations may still exist,
-                             // will be transformed to zombie when all activations are gone
-         zombie       = 2,   // no activations exist, nmethod is ready for purge
-         unloaded     = 3 }; // there should be no activations, should not be called,
-                             // will be transformed to zombie immediately
-
   jbyte _scavenge_root_state;
 
 #if INCLUDE_RTM_OPT
@@ -431,6 +424,13 @@
   address entry_point() const                     { return _entry_point;             } // normal entry point
   address verified_entry_point() const            { return _verified_entry_point;    } // if klass is correct
 
+  enum { in_use       = 0,   // executable nmethod
+         not_entrant  = 1,   // marked for deoptimization but activations may still exist,
+                             // will be transformed to zombie when all activations are gone
+         zombie       = 2,   // no activations exist, nmethod is ready for purge
+         unloaded     = 3 }; // there should be no activations, should not be called,
+                             // will be transformed to zombie immediately
+
   // flag accessing and manipulation
   bool  is_in_use() const                         { return _state == in_use; }
   bool  is_alive() const                          { return _state == in_use || _state == not_entrant; }
@@ -759,7 +759,7 @@
   // support for code generation
   static int verified_entry_point_offset()        { return offset_of(nmethod, _verified_entry_point); }
   static int osr_entry_point_offset()             { return offset_of(nmethod, _osr_entry_point); }
-  static int entry_bci_offset()                   { return offset_of(nmethod, _entry_bci); }
+  static int state_offset()                       { return offset_of(nmethod, _state); }
 
   // RedefineClasses support.   Mark metadata in nmethods as on_stack so that
   // redefine classes doesn't purge it.
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -330,7 +330,7 @@
         if (do_OSR) {                                                                               \
           nmethod* osr_nmethod;                                                                     \
           OSR_REQUEST(osr_nmethod, branch_pc);                                                      \
-          if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) {          \
+          if (osr_nmethod != NULL && osr_nmethod->is_in_use()) {                                    \
             intptr_t* buf;                                                                          \
             /* Call OSR migration with last java frame only, no checks. */                          \
             CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD));                      \
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Sep 18 19:30:59 2014 +0200
@@ -2518,7 +2518,6 @@
   /*********************************************/                         \
                                                                           \
   declare_constant(InvocationEntryBci)                                    \
-  declare_constant(InvalidOSREntryBci)                                    \
                                                                           \
   /***************/                                                       \
   /* OopMapValue */                                                       \
--- a/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Tue Sep 16 12:13:13 2014 +0200
+++ b/hotspot/src/share/vm/utilities/globalDefinitions.hpp	Thu Sep 18 19:30:59 2014 +0200
@@ -882,8 +882,7 @@
 
 // Handy constants for deciding which compiler mode to use.
 enum MethodCompilation {
-  InvocationEntryBci = -1,     // i.e., not a on-stack replacement compilation
-  InvalidOSREntryBci = -2
+  InvocationEntryBci = -1     // i.e., not a on-stack replacement compilation
 };
 
 // Enumeration to distinguish tiers of compilation
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/osr/TestRangeCheck.java	Thu Sep 18 19:30:59 2014 +0200
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestRangeCheck
+ * @bug 8054883
+ * @summary Tests that range check is not skipped
+ */
+
+public class TestRangeCheck {
+    public static void main(String args[]) {
+        try {
+            test();
+            throw new AssertionError("Expected ArrayIndexOutOfBoundsException was not thrown");
+        } catch (ArrayIndexOutOfBoundsException e) {
+            System.out.println("Expected ArrayIndexOutOfBoundsException was thrown");
+        }
+    }
+
+    private static void test() {
+        int arr[] = new int[1];
+        int result = 1;
+
+        // provoke OSR compilation
+        for (int i = 0; i < Integer.MAX_VALUE; i++) {
+        }
+
+        if (result > 0 && arr[~result] > 0) {
+            arr[~result] = 0;
+        }
+    }
+}