Merge
authornever
Mon, 09 Aug 2010 17:51:56 -0700
changeset 6184 a017b5ba6782
parent 6176 4d9030fe341f (current diff)
parent 6183 4c74cfe14f20 (diff)
child 6185 1d2b3c0c9674
Merge
hotspot/src/share/vm/asm/codeBuffer.hpp
hotspot/src/share/vm/memory/allocation.cpp
hotspot/src/share/vm/memory/allocation.hpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/thread.cpp
hotspot/src/share/vm/runtime/thread.hpp
hotspot/src/share/vm/utilities/vmError.cpp
--- a/hotspot/make/solaris/makefiles/sparcWorks.make	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/make/solaris/makefiles/sparcWorks.make	Mon Aug 09 17:51:56 2010 -0700
@@ -145,11 +145,20 @@
 OPT_CFLAGS/O2=-xO2
 OPT_CFLAGS/NOOPT=-xO1
 
+#################################################
+# Begin current (>=5.9) Forte compiler options #
+#################################################
+
 ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
 ifeq ($(Platform_arch), x86)
 OPT_CFLAGS/NO_TAIL_CALL_OPT  = -Wu,-O~yz
 OPT_CCFLAGS/NO_TAIL_CALL_OPT = -Qoption ube -O~yz
+OPT_CFLAGS/stubGenerator_x86_32.o = $(OPT_CFLAGS) -xspace
+OPT_CFLAGS/stubGenerator_x86_64.o = $(OPT_CFLAGS) -xspace
 endif # Platform_arch == x86
+ifeq ("${Platform_arch}", "sparc")
+OPT_CFLAGS/stubGenerator_sparc.o = $(OPT_CFLAGS) -xspace
+endif
 endif # COMPILER_REV_NUMERIC >= 509
 
 #################################################
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -7568,21 +7568,27 @@
 
   // Scan RCX words at [RDI] for an occurrence of RAX.
   // Set NZ/Z based on last compare.
+  // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
+  // not change flags (only scas instruction which is repeated sets flags).
+  // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
 #ifdef _LP64
   // This part is tricky, as values in supers array could be 32 or 64 bit wide
   // and we store values in objArrays always encoded, thus we need to encode
   // the value of rax before repne.  Note that rax is dead after the repne.
   if (UseCompressedOops) {
-    encode_heap_oop_not_null(rax);
+    encode_heap_oop_not_null(rax); // Changes flags.
     // The superclass is never null; it would be a basic system error if a null
     // pointer were to sneak in here.  Note that we have already loaded the
     // Klass::super_check_offset from the super_klass in the fast path,
     // so if there is a null in that register, we are already in the afterlife.
+    testl(rax,rax); // Set Z = 0
     repne_scanl();
   } else
 #endif // _LP64
+  {
+    testptr(rax,rax); // Set Z = 0
     repne_scan();
-
+  }
   // Unspill the temp. registers:
   if (pushed_rdi)  pop(rdi);
   if (pushed_rcx)  pop(rcx);
@@ -8257,30 +8263,35 @@
   }
 }
 
+#ifdef ASSERT
+void MacroAssembler::verify_heapbase(const char* msg) {
+  assert (UseCompressedOops, "should be compressed");
+  assert (Universe::heap() != NULL, "java heap should be initialized");
+  if (CheckCompressedOops) {
+    Label ok;
+    push(rscratch1); // cmpptr trashes rscratch1
+    cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
+    jcc(Assembler::equal, ok);
+    stop(msg);
+    bind(ok);
+    pop(rscratch1);
+  }
+}
+#endif
+
 // Algorithm must match oop.inline.hpp encode_heap_oop.
 void MacroAssembler::encode_heap_oop(Register r) {
-  assert (UseCompressedOops, "should be compressed");
-  assert (Universe::heap() != NULL, "java heap should be initialized");
+#ifdef ASSERT
+  verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
+#endif
+  verify_oop(r, "broken oop in encode_heap_oop");
   if (Universe::narrow_oop_base() == NULL) {
-    verify_oop(r, "broken oop in encode_heap_oop");
     if (Universe::narrow_oop_shift() != 0) {
       assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
       shrq(r, LogMinObjAlignmentInBytes);
     }
     return;
   }
-#ifdef ASSERT
-  if (CheckCompressedOops) {
-    Label ok;
-    push(rscratch1); // cmpptr trashes rscratch1
-    cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
-    jcc(Assembler::equal, ok);
-    stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
-    bind(ok);
-    pop(rscratch1);
-  }
-#endif
-  verify_oop(r, "broken oop in encode_heap_oop");
   testq(r, r);
   cmovq(Assembler::equal, r, r12_heapbase);
   subq(r, r12_heapbase);
@@ -8288,9 +8299,8 @@
 }
 
 void MacroAssembler::encode_heap_oop_not_null(Register r) {
-  assert (UseCompressedOops, "should be compressed");
-  assert (Universe::heap() != NULL, "java heap should be initialized");
 #ifdef ASSERT
+  verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
   if (CheckCompressedOops) {
     Label ok;
     testq(r, r);
@@ -8310,9 +8320,8 @@
 }
 
 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
-  assert (UseCompressedOops, "should be compressed");
-  assert (Universe::heap() != NULL, "java heap should be initialized");
 #ifdef ASSERT
+  verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
   if (CheckCompressedOops) {
     Label ok;
     testq(src, src);
@@ -8335,40 +8344,21 @@
 }
 
 void  MacroAssembler::decode_heap_oop(Register r) {
-  assert (UseCompressedOops, "should be compressed");
-  assert (Universe::heap() != NULL, "java heap should be initialized");
+#ifdef ASSERT
+  verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
+#endif
   if (Universe::narrow_oop_base() == NULL) {
     if (Universe::narrow_oop_shift() != 0) {
       assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
       shlq(r, LogMinObjAlignmentInBytes);
     }
-    verify_oop(r, "broken oop in decode_heap_oop");
-    return;
-  }
-#ifdef ASSERT
-  if (CheckCompressedOops) {
-    Label ok;
-    push(rscratch1);
-    cmpptr(r12_heapbase,
-           ExternalAddress((address)Universe::narrow_oop_base_addr()));
-    jcc(Assembler::equal, ok);
-    stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
-    bind(ok);
-    pop(rscratch1);
-  }
-#endif
-
-  Label done;
-  shlq(r, LogMinObjAlignmentInBytes);
-  jccb(Assembler::equal, done);
-  addq(r, r12_heapbase);
-#if 0
-   // alternate decoding probably a wash.
-   testq(r, r);
-   jccb(Assembler::equal, done);
-   leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
-#endif
-  bind(done);
+  } else {
+    Label done;
+    shlq(r, LogMinObjAlignmentInBytes);
+    jccb(Assembler::equal, done);
+    addq(r, r12_heapbase);
+    bind(done);
+  }
   verify_oop(r, "broken oop in decode_heap_oop");
 }
 
@@ -8410,9 +8400,11 @@
         addq(dst, r12_heapbase);
       }
     }
-  } else if (dst != src) {
+  } else {
     assert (Universe::narrow_oop_base() == NULL, "sanity");
-    movq(dst, src);
+    if (dst != src) {
+      movq(dst, src);
+    }
   }
 }
 
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -1714,6 +1714,9 @@
 
   // if heap base register is used - reinit it with the correct value
   void reinit_heapbase();
+
+  DEBUG_ONLY(void verify_heapbase(const char* msg);)
+
 #endif // _LP64
 
   // Int division/remainder for Java
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -128,7 +128,11 @@
   delete _overflow_arena;
 
 #ifdef ASSERT
+  // Save allocation type to execute assert in ~ResourceObj()
+  // which is called after this destructor.
+  ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
   Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
+  ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
 #endif
 }
 
--- a/hotspot/src/share/vm/asm/codeBuffer.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/asm/codeBuffer.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -278,7 +278,7 @@
   // special case during expansion which is handled internally.  This
   // is done to guarantee proper cleanup of resources.
   void* operator new(size_t size) { return ResourceObj::operator new(size); }
-  void  operator delete(void* p)  {        ResourceObj::operator delete(p); }
+  void  operator delete(void* p)  { ShouldNotCallThis(); }
 
  public:
   typedef int csize_t;  // code size type; would be size_t except for history
--- a/hotspot/src/share/vm/ci/ciField.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/ci/ciField.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -339,7 +339,7 @@
   if (_type != NULL) _type->print_name();
   else               tty->print("(reference)");
   tty->print(" is_constant=%s", bool_to_str(_is_constant));
-  if (_is_constant) {
+  if (_is_constant && is_static()) {
     tty->print(" constant_value=");
     _constant_value.print();
   }
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -403,8 +403,9 @@
     instanceKlass* ik = get_instanceKlass();
     int max_n_fields = ik->fields()->length()/instanceKlass::next_offset;
 
+    Arena* arena = curEnv->arena();
     _non_static_fields =
-      new (curEnv->arena()) GrowableArray<ciField*>(max_n_fields);
+      new (arena) GrowableArray<ciField*>(arena, max_n_fields, 0, NULL);
     NonStaticFieldFiller filler(curEnv, _non_static_fields);
     ik->do_nonstatic_fields(&filler);
   }
--- a/hotspot/src/share/vm/ci/ciMethodBlocks.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/ci/ciMethodBlocks.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -252,7 +252,7 @@
                           _arena(arena), _num_blocks(0), _code_size(meth->code_size()) {
   int block_estimate = _code_size / 8;
 
-  _blocks =  new(_arena) GrowableArray<ciBlock *>(block_estimate);
+  _blocks =  new(_arena) GrowableArray<ciBlock *>(_arena, block_estimate, 0, NULL);
   int b2bsize = _code_size * sizeof(ciBlock **);
   _bci_to_block = (ciBlock **) arena->Amalloc(b2bsize);
   Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord));
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -2591,7 +2591,7 @@
                                StateVector* temp_vector,
                                JsrSet* temp_set) {
   int dft_len = 100;
-  GrowableArray<Block*> stk(arena(), dft_len, 0, NULL);
+  GrowableArray<Block*> stk(dft_len);
 
   ciBlock* dummy = _methodBlocks->make_dummy_block();
   JsrSet* root_set = new JsrSet(NULL, 0);
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -62,6 +62,7 @@
   ClassFileStream cfs1 = *cfs0;
   ClassFileStream* cfs = &cfs1;
 #ifdef ASSERT
+  assert(cfs->allocated_on_stack(),"should be local");
   u1* old_current = cfs0->current();
 #endif
 
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -158,13 +158,18 @@
   // The line below is the worst bit of C++ hackery I've ever written
   // (Detlefs, 11/23).  You should think of it as equivalent to
   // "_regions(100, true)": initialize the growable array and inform it
-  // that it should allocate its elem array(s) on the C heap.  The first
-  // argument, however, is actually a comma expression (new-expr, 100).
-  // The purpose of the new_expr is to inform the growable array that it
-  // is *already* allocated on the C heap: it uses the placement syntax to
-  // keep it from actually doing any allocation.
-  _markedRegions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
-                                             (void*)&_markedRegions,
+  // that it should allocate its elem array(s) on the C heap.
+  //
+  // The first argument, however, is actually a comma expression
+  // (set_allocation_type(this, C_HEAP), 100). The purpose of the
+  // set_allocation_type() call is to replace the default allocation
+  // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
+  // allow to pass the assert in GenericGrowableArray() which checks
+  // that a growable array object must be on C heap if elements are.
+  //
+  // Note: containing object is allocated on C heap since it is CHeapObj.
+  //
+  _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
                                              ResourceObj::C_HEAP),
                   100),
                  true),
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -42,14 +42,19 @@
   // The line below is the worst bit of C++ hackery I've ever written
   // (Detlefs, 11/23).  You should think of it as equivalent to
   // "_regions(100, true)": initialize the growable array and inform it
-  // that it should allocate its elem array(s) on the C heap.  The first
-  // argument, however, is actually a comma expression (new-expr, 100).
-  // The purpose of the new_expr is to inform the growable array that it
-  // is *already* allocated on the C heap: it uses the placement syntax to
-  // keep it from actually doing any allocation.
-  _regions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
-                                       (void*)&_regions,
-                                       ResourceObj::C_HEAP),
+  // that it should allocate its elem array(s) on the C heap.
+  //
+  // The first argument, however, is actually a comma expression
+  // (set_allocation_type(this, C_HEAP), 100). The purpose of the
+  // set_allocation_type() call is to replace the default allocation
+  // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
+  // allow to pass the assert in GenericGrowableArray() which checks
+  // that a growable array object must be on C heap if elements are.
+  //
+  // Note: containing object is allocated on C heap since it is CHeapObj.
+  //
+  _regions((ResourceObj::set_allocation_type((address)&_regions,
+                                             ResourceObj::C_HEAP),
             (int)max_size),
            true),
   _next_rr_candidate(0),
--- a/hotspot/src/share/vm/memory/allocation.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -43,24 +43,73 @@
   switch (type) {
    case C_HEAP:
     res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
+    DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
     break;
    case RESOURCE_AREA:
+    // new(size) sets allocation type RESOURCE_AREA.
     res = (address)operator new(size);
     break;
    default:
     ShouldNotReachHere();
   }
-  // Set allocation type in the resource object for assertion checks.
-  DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
   return res;
 }
 
 void ResourceObj::operator delete(void* p) {
   assert(((ResourceObj *)p)->allocated_on_C_heap(),
          "delete only allowed for C_HEAP objects");
+  DEBUG_ONLY(((ResourceObj *)p)->_allocation = badHeapOopVal;)
   FreeHeap(p);
 }
 
+#ifdef ASSERT
+void ResourceObj::set_allocation_type(address res, allocation_type type) {
+    // Set allocation type in the resource object
+    uintptr_t allocation = (uintptr_t)res;
+    assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
+    assert(type <= allocation_mask, "incorrect allocation type");
+    ((ResourceObj *)res)->_allocation = ~(allocation + type);
+}
+
+ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
+    assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object");
+    return (allocation_type)((~_allocation) & allocation_mask);
+}
+
+ResourceObj::ResourceObj() { // default constructor
+    if (~(_allocation | allocation_mask) != (uintptr_t)this) {
+      set_allocation_type((address)this, STACK_OR_EMBEDDED);
+    } else if (allocated_on_stack()) {
+      // For some reason we got a value which looks like an allocation on stack.
+      // Pass if it is really allocated on stack.
+      assert(Thread::current()->on_local_stack((address)this),"should be on stack");
+    } else {
+      assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(),
+             "allocation_type should be set by operator new()");
+    }
+}
+
+ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
+    // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
+    set_allocation_type((address)this, STACK_OR_EMBEDDED);
+}
+
+ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
+    // Used in InlineTree::ok_to_inline() for WarmCallInfo.
+    assert(allocated_on_stack(), "copy only into local");
+    // Keep current _allocation value;
+    return *this;
+}
+
+ResourceObj::~ResourceObj() {
+    // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
+    if (!allocated_on_C_heap()) {  // ResourceObj::delete() zaps _allocation for C_heap.
+      _allocation = badHeapOopVal; // zap type
+    }
+}
+#endif // ASSERT
+
+
 void trace_heap_malloc(size_t size, const char* name, void* p) {
   // A lock is not needed here - tty uses a lock internally
   tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
--- a/hotspot/src/share/vm/memory/allocation.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/memory/allocation.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -317,32 +317,36 @@
 // use delete to deallocate.
 class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
  public:
-  enum allocation_type { UNKNOWN = 0, C_HEAP, RESOURCE_AREA, ARENA };
+  enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
+  static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
 #ifdef ASSERT
  private:
-  allocation_type _allocation;
+  // When this object is allocated on stack the new() operator is not
+  // called but garbage on stack may look like a valid allocation_type.
+  // Store negated 'this' pointer when new() is called to distinguish cases.
+  uintptr_t _allocation;
  public:
-  bool allocated_on_C_heap()    { return _allocation == C_HEAP; }
+  allocation_type get_allocation_type() const;
+  bool allocated_on_stack()    const { return get_allocation_type() == STACK_OR_EMBEDDED; }
+  bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
+  bool allocated_on_C_heap()   const { return get_allocation_type() == C_HEAP; }
+  bool allocated_on_arena()    const { return get_allocation_type() == ARENA; }
+  ResourceObj(); // default construtor
+  ResourceObj(const ResourceObj& r); // default copy construtor
+  ResourceObj& operator=(const ResourceObj& r); // default copy assignment
+  ~ResourceObj();
 #endif // ASSERT
 
  public:
   void* operator new(size_t size, allocation_type type);
   void* operator new(size_t size, Arena *arena) {
       address res = (address)arena->Amalloc(size);
-      // Set allocation type in the resource object
-      DEBUG_ONLY(((ResourceObj *)res)->_allocation = ARENA;)
+      DEBUG_ONLY(set_allocation_type(res, ARENA);)
       return res;
   }
   void* operator new(size_t size) {
       address res = (address)resource_allocate_bytes(size);
-      // Set allocation type in the resource object
-      DEBUG_ONLY(((ResourceObj *)res)->_allocation = RESOURCE_AREA;)
-      return res;
-  }
-  void* operator new(size_t size, void* where, allocation_type type) {
-      void* res = where;
-      // Set allocation type in the resource object
-      DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
+      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
       return res;
   }
   void  operator delete(void* p);
--- a/hotspot/src/share/vm/opto/block.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/block.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -353,7 +353,8 @@
 PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
   Phase(CFG),
   _bbs(a),
-  _root(r)
+  _root(r),
+  _node_latency(NULL)
 #ifndef PRODUCT
   , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
 #endif
--- a/hotspot/src/share/vm/opto/block.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/block.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -374,7 +374,7 @@
   float _outer_loop_freq;       // Outmost loop frequency
 
   // Per node latency estimation, valid only during GCM
-  GrowableArray<uint> _node_latency;
+  GrowableArray<uint> *_node_latency;
 
 #ifndef PRODUCT
   bool _trace_opto_pipelining;  // tracing flag
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -281,6 +281,12 @@
   product(bool, InsertMemBarAfterArraycopy, true,                           \
           "Insert memory barrier after arraycopy call")                     \
                                                                             \
+  develop(bool, SubsumeLoads, true,                                         \
+          "Attempt to compile while subsuming loads into machine instructions.") \
+                                                                            \
+  develop(bool, StressRecompilation, false,                                 \
+          "Recompile each compiled method without subsuming loads or escape analysis.") \
+                                                                            \
   /* controls for tier 1 compilations */                                    \
                                                                             \
   develop(bool, Tier1CountInvocations, true,                                \
--- a/hotspot/src/share/vm/opto/c2compiler.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/c2compiler.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -103,13 +103,14 @@
   if (!is_initialized()) {
     initialize();
   }
-  bool subsume_loads = true;
+  bool subsume_loads = SubsumeLoads;
   bool do_escape_analysis = DoEscapeAnalysis &&
     !env->jvmti_can_access_local_variables();
   while (!env->failing()) {
     // Attempt to compile while subsuming loads into machine instructions.
     Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
 
+
     // Check result and retry if appropriate.
     if (C.failure_reason() != NULL) {
       if (C.failure_reason_is(retry_no_subsuming_loads())) {
@@ -127,6 +128,16 @@
       // on the ciEnv via env->record_method_not_compilable().
       env->record_failure(C.failure_reason());
     }
+    if (StressRecompilation) {
+      if (subsume_loads) {
+        subsume_loads = false;
+        continue;  // retry
+      }
+      if (do_escape_analysis) {
+        do_escape_analysis = false;
+        continue;  // retry
+      }
+    }
 
     // No retry; just break the loop.
     break;
--- a/hotspot/src/share/vm/opto/chaitin.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/chaitin.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -569,7 +569,7 @@
         if (trace_spilling() && lrg._def != NULL) {
           // collect defs for MultiDef printing
           if (lrg._defs == NULL) {
-            lrg._defs = new (_ifg->_arena) GrowableArray<Node*>();
+            lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
             lrg._defs->append(lrg._def);
           }
           lrg._defs->append(n);
--- a/hotspot/src/share/vm/opto/compile.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/compile.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -904,8 +904,8 @@
   probe_alias_cache(NULL)->_index = AliasIdxTop;
 
   _intrinsics = NULL;
-  _macro_nodes = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
-  _predicate_opaqs = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
+  _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
+  _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   register_library_intrinsics();
 }
 
--- a/hotspot/src/share/vm/opto/gcm.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/gcm.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -841,7 +841,7 @@
 #ifndef PRODUCT
   if (trace_opto_pipelining()) {
     tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
-               n->_idx, _node_latency.at_grow(n->_idx));
+               n->_idx, _node_latency->at_grow(n->_idx));
     dump();
   }
 #endif
@@ -853,7 +853,7 @@
     return;
 
   uint nlen = n->len();
-  uint use_latency = _node_latency.at_grow(n->_idx);
+  uint use_latency = _node_latency->at_grow(n->_idx);
   uint use_pre_order = _bbs[n->_idx]->_pre_order;
 
   for ( uint j=0; j<nlen; j++ ) {
@@ -884,15 +884,15 @@
     uint delta_latency = n->latency(j);
     uint current_latency = delta_latency + use_latency;
 
-    if (_node_latency.at_grow(def->_idx) < current_latency) {
-      _node_latency.at_put_grow(def->_idx, current_latency);
+    if (_node_latency->at_grow(def->_idx) < current_latency) {
+      _node_latency->at_put_grow(def->_idx, current_latency);
     }
 
 #ifndef PRODUCT
     if (trace_opto_pipelining()) {
       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
                     use_latency, j, delta_latency, current_latency, def->_idx,
-                    _node_latency.at_grow(def->_idx));
+                    _node_latency->at_grow(def->_idx));
     }
 #endif
   }
@@ -926,7 +926,7 @@
       return 0;
 
     uint nlen = use->len();
-    uint nl = _node_latency.at_grow(use->_idx);
+    uint nl = _node_latency->at_grow(use->_idx);
 
     for ( uint j=0; j<nlen; j++ ) {
       if (use->in(j) == n) {
@@ -962,7 +962,7 @@
 #ifndef PRODUCT
   if (trace_opto_pipelining()) {
     tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
-               n->_idx, _node_latency.at_grow(n->_idx));
+               n->_idx, _node_latency->at_grow(n->_idx));
     dump();
   }
 #endif
@@ -975,7 +975,7 @@
     if (latency < l) latency = l;
   }
 
-  _node_latency.at_put_grow(n->_idx, latency);
+  _node_latency->at_put_grow(n->_idx, latency);
 }
 
 //------------------------------hoist_to_cheaper_block-------------------------
@@ -985,9 +985,9 @@
   const double delta = 1+PROB_UNLIKELY_MAG(4);
   Block* least       = LCA;
   double least_freq  = least->_freq;
-  uint target        = _node_latency.at_grow(self->_idx);
-  uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx);
-  uint end_latency   = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
+  uint target        = _node_latency->at_grow(self->_idx);
+  uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
+  uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
   bool in_latency    = (target <= start_latency);
   const Block* root_block = _bbs[_root->_idx];
 
@@ -1005,7 +1005,7 @@
 #ifndef PRODUCT
   if (trace_opto_pipelining()) {
     tty->print("# Find cheaper block for latency %d: ",
-      _node_latency.at_grow(self->_idx));
+      _node_latency->at_grow(self->_idx));
     self->dump();
     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
       LCA->_pre_order,
@@ -1032,9 +1032,9 @@
     if (mach && LCA == root_block)
       break;
 
-    uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx);
+    uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
     uint end_idx   = LCA->end_idx();
-    uint end_lat   = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx);
+    uint end_lat   = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
     double LCA_freq = LCA->_freq;
 #ifndef PRODUCT
     if (trace_opto_pipelining()) {
@@ -1073,7 +1073,7 @@
       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
     }
 #endif
-    _node_latency.at_put_grow(self->_idx, end_latency);
+    _node_latency->at_put_grow(self->_idx, end_latency);
     partial_latency_of_defs(self);
   }
 
@@ -1255,8 +1255,7 @@
 
   // Compute the latency information (via backwards walk) for all the
   // instructions in the graph
-  GrowableArray<uint> node_latency;
-  _node_latency = node_latency;
+  _node_latency = new GrowableArray<uint>(); // resource_area allocation
 
   if( C->do_scheduling() )
     ComputeLatenciesBackwards(visited, stack);
@@ -1341,6 +1340,8 @@
     }
   }
 #endif
+  // Dead.
+  _node_latency = (GrowableArray<uint> *)0xdeadbeef;
 }
 
 
--- a/hotspot/src/share/vm/opto/lcm.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -461,7 +461,7 @@
       n_choice = 1;
     }
 
-    uint n_latency = cfg->_node_latency.at_grow(n->_idx);
+    uint n_latency = cfg->_node_latency->at_grow(n->_idx);
     uint n_score   = n->req();   // Many inputs get high score to break ties
 
     // Keep best latency found
@@ -738,7 +738,7 @@
         Node     *n = _nodes[j];
         int     idx = n->_idx;
         tty->print("#   ready cnt:%3d  ", ready_cnt[idx]);
-        tty->print("latency:%3d  ", cfg->_node_latency.at_grow(idx));
+        tty->print("latency:%3d  ", cfg->_node_latency->at_grow(idx));
         tty->print("%4d: %s\n", idx, n->Name());
       }
     }
@@ -765,7 +765,7 @@
 #ifndef PRODUCT
     if (cfg->trace_opto_pipelining()) {
       tty->print("#    select %d: %s", n->_idx, n->Name());
-      tty->print(", latency:%d", cfg->_node_latency.at_grow(n->_idx));
+      tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx));
       n->dump();
       if (Verbose) {
         tty->print("#   ready list:");
--- a/hotspot/src/share/vm/opto/macro.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/macro.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -720,7 +720,7 @@
       if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
         if (!elem_type->is_loaded()) {
           field_type = TypeInstPtr::BOTTOM;
-        } else if (field != NULL && field->is_constant()) {
+        } else if (field != NULL && field->is_constant() && field->is_static()) {
           // This can happen if the constant oop is non-perm.
           ciObject* con = field->constant_value().as_object();
           // Do not "join" in the previous type; it doesn't add value,
--- a/hotspot/src/share/vm/opto/output.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/opto/output.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -382,6 +382,10 @@
           if (min_offset_from_last_call == 0) {
             blk_size += nop_size;
           }
+        } else if (mach->ideal_Opcode() == Op_Jump) {
+          const_size += b->_num_succs; // Address table size
+          // The size is valid even for 64 bit since it is
+          // multiplied by 2*jintSize on this method exit.
         }
       }
       min_offset_from_last_call += inst_size;
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -2442,6 +2442,10 @@
           "Call fatal if this exception is thrown.  Example: "              \
           "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
                                                                             \
+  notproduct(ccstr, AbortVMOnExceptionMessage, NULL,                        \
+          "Call fatal if the exception pointed by AbortVMOnException "      \
+          "has this message.")                                              \
+                                                                            \
   develop(bool, DebugVtables, false,                                        \
           "add debugging code to vtable dispatch")                          \
                                                                             \
--- a/hotspot/src/share/vm/runtime/thread.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -807,7 +807,7 @@
 // should be revisited, and they should be removed if possible.
 
 bool Thread::is_lock_owned(address adr) const {
-  return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
+  return on_local_stack(adr);
 }
 
 bool Thread::set_as_starting_thread() {
--- a/hotspot/src/share/vm/runtime/thread.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/runtime/thread.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -446,6 +446,11 @@
   void    set_stack_size(size_t size)  { _stack_size = size; }
   void    record_stack_base_and_size();
 
+  bool    on_local_stack(address adr) const {
+    /* QQQ this has knowledge of direction, ought to be a stack method */
+    return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
+  }
+
   int     lgrp_id() const                 { return _lgrp_id; }
   void    set_lgrp_id(int value)          { _lgrp_id = value; }
 
--- a/hotspot/src/share/vm/utilities/exceptions.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/utilities/exceptions.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -117,7 +117,7 @@
                   (address)h_exception(), file, line, thread);
   }
   // for AbortVMOnException flag
-  NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
+  NOT_PRODUCT(Exceptions::debug_check_abort(h_exception, message));
 
   // Check for special boot-strapping/vm-thread handling
   if (special_exception(thread, file, line, h_exception)) return;
@@ -375,17 +375,26 @@
 
 #ifndef PRODUCT
 // caller frees value_string if necessary
-void Exceptions::debug_check_abort(const char *value_string) {
+void Exceptions::debug_check_abort(const char *value_string, const char* message) {
   if (AbortVMOnException != NULL && value_string != NULL &&
       strstr(value_string, AbortVMOnException)) {
-    fatal(err_msg("Saw %s, aborting", value_string));
+    if (AbortVMOnExceptionMessage == NULL || message == NULL ||
+        strcmp(message, AbortVMOnExceptionMessage) == 0) {
+      fatal(err_msg("Saw %s, aborting", value_string));
+    }
   }
 }
 
-void Exceptions::debug_check_abort(Handle exception) {
+void Exceptions::debug_check_abort(Handle exception, const char* message) {
   if (AbortVMOnException != NULL) {
     ResourceMark rm;
-    debug_check_abort(instanceKlass::cast(exception()->klass())->external_name());
+    if (message == NULL && exception->is_a(SystemDictionary::Throwable_klass())) {
+      oop msg = java_lang_Throwable::message(exception);
+      if (msg != NULL) {
+        message = java_lang_String::as_utf8_string(msg);
+      }
+    }
+    debug_check_abort(instanceKlass::cast(exception()->klass())->external_name(), message);
   }
 }
 #endif
--- a/hotspot/src/share/vm/utilities/exceptions.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/utilities/exceptions.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -143,8 +143,8 @@
   static void throw_stack_overflow_exception(Thread* thread, const char* file, int line);
 
   // for AbortVMOnException flag
-  NOT_PRODUCT(static void debug_check_abort(Handle exception);)
-  NOT_PRODUCT(static void debug_check_abort(const char *value_string);)
+  NOT_PRODUCT(static void debug_check_abort(Handle exception, const char* message = NULL);)
+  NOT_PRODUCT(static void debug_check_abort(const char *value_string, const char* message = NULL);)
 };
 
 
--- a/hotspot/src/share/vm/utilities/growableArray.hpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/utilities/growableArray.hpp	Mon Aug 09 17:51:56 2010 -0700
@@ -97,7 +97,10 @@
     assert(_len >= 0 && _len <= _max, "initial_len too big");
     _arena = (c_heap ? (Arena*)1 : NULL);
     set_nesting();
-    assert(!c_heap || allocated_on_C_heap(), "growable array must be on C heap if elements are");
+    assert(!on_C_heap() || allocated_on_C_heap(), "growable array must be on C heap if elements are");
+    assert(!on_stack() ||
+           (allocated_on_res_area() || allocated_on_stack()),
+           "growable array must be on stack if elements are not on arena and not on C heap");
   }
 
   // This GA will use the given arena for storage.
@@ -108,6 +111,10 @@
     assert(_len >= 0 && _len <= _max, "initial_len too big");
     _arena = arena;
     assert(on_arena(), "arena has taken on reserved value 0 or 1");
+    // Relax next assert to allow object allocation on resource area,
+    // on stack or embedded into an other object.
+    assert(allocated_on_arena() || allocated_on_stack(),
+           "growable array must be on arena or on stack if elements are on arena");
   }
 
   void* raw_allocate(int elementSize);
--- a/hotspot/src/share/vm/utilities/vmError.cpp	Tue Aug 03 08:13:38 2010 -0400
+++ b/hotspot/src/share/vm/utilities/vmError.cpp	Mon Aug 09 17:51:56 2010 -0700
@@ -479,8 +479,8 @@
 
        if (fr.sp()) {
          st->print(",  sp=" PTR_FORMAT, fr.sp());
-         st->print(",  free space=%" INTPTR_FORMAT "k",
-                     ((intptr_t)fr.sp() - (intptr_t)stack_bottom) >> 10);
+         size_t free_stack_size = pointer_delta(fr.sp(), stack_bottom, 1024);
+         st->print(",  free space=" SIZE_FORMAT "k", free_stack_size);
        }
 
        st->cr();